diff -Nru r-cran-lavaan-0.5.22/debian/changelog r-cran-lavaan-0.5.23.1097/debian/changelog --- r-cran-lavaan-0.5.22/debian/changelog 2017-09-27 14:07:25.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/debian/changelog 2017-09-30 06:00:39.000000000 +0000 @@ -1,8 +1,11 @@ -r-cran-lavaan (0.5.22-1build1) artful; urgency=medium +r-cran-lavaan (0.5.23.1097-1) unstable; urgency=medium - * No-change rebuild to pick up r-api-3.4 + * Team upload + * New upstream version + * Standards-Version: 4.1.1 + * Add README.source to document binary data files - -- Graham Inggs Wed, 27 Sep 2017 14:07:25 +0000 + -- Andreas Tille Sat, 30 Sep 2017 08:00:39 +0200 r-cran-lavaan (0.5.22-1) unstable; urgency=medium diff -Nru r-cran-lavaan-0.5.22/debian/control r-cran-lavaan-0.5.23.1097/debian/control --- r-cran-lavaan-0.5.22/debian/control 2016-11-29 17:40:14.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/debian/control 2017-09-30 06:00:39.000000000 +0000 @@ -9,8 +9,9 @@ r-cran-mass, r-cran-mnormt, r-cran-pbivnorm, - r-cran-quadprog -Standards-Version: 3.9.8 + r-cran-quadprog, + r-cran-numderiv +Standards-Version: 4.1.1 Vcs-Browser: https://anonscm.debian.org/cgit/debian-science/packages/r-cran-lavaan.git Vcs-Git: https://anonscm.debian.org/git/debian-science/packages/r-cran-lavaan.git Homepage: https://cran.r-project.org/package=lavaan diff -Nru r-cran-lavaan-0.5.22/debian/docs r-cran-lavaan-0.5.23.1097/debian/docs --- r-cran-lavaan-0.5.22/debian/docs 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/debian/docs 2017-09-30 06:00:39.000000000 +0000 @@ -0,0 +1,3 @@ +debian/tests/run-unit-test +debian/README.test +tests diff -Nru r-cran-lavaan-0.5.22/debian/README.source r-cran-lavaan-0.5.23.1097/debian/README.source --- r-cran-lavaan-0.5.22/debian/README.source 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/debian/README.source 2017-09-30 06:00:39.000000000 +0000 @@ -0,0 +1,36 @@ +Explanation for binary files inside source package according to + http://lists.debian.org/debian-devel/2013/09/msg00332.html + +Files: data/Demo.growth.rda +Documented: man/Demo.growth.Rd + A toy dataset containing measures on 4 time points (t1,t2, t3 and t4), + two predictors (x1 and x2) influencing the random intercept and slope, and + a time-varying covariate (c1, c2, c3 and c4). + +Files: data/FacialBurns.rda +Documented: man/FacialBurns.Rd + A dataset from the Dutch burn center (http://www.adbc.nl). + The data were used to examine psychosocial functioning in patients with + facial burn wounds. Psychosocial functioning was measured by + Anxiety and depression symptoms (HADS), and self-esteem + (Rosenberg's self-esteem scale). + +Files: data/HolzingerSwineford1939.rda +Documented: man/HolzingerSwineford1939.Rd + The classic Holzinger and Swineford (1939) dataset consists of mental + ability test scores of seventh- and eighth-grade children from two + different schools (Pasteur and Grant-White). In the original dataset + (available in the \code{MBESS} package), there are scores for 26 tests. + However, a smaller subset with 9 variables is more widely used in the + literature (for example in Joreskog's 1969 paper, which also uses the 145 + subjects from the Grant-White school only). + +Files: data/PoliticalDemocracy.rda +Documented: man/PoliticalDemocracy.Rd + The `famous' Industrialization and Political Democracy dataset. This dataset is + used throughout Bollen's 1989 book (see pages 12, 17, 36 in chapter 2, pages + 228 and following in chapter 7, pages 321 and following in chapter 8). + The dataset contains various measures of political democracy and + industrialization in developing countries. + + -- Andreas Tille Sat, 30 Sep 2017 07:50:08 +0200 diff -Nru r-cran-lavaan-0.5.22/debian/README.test r-cran-lavaan-0.5.23.1097/debian/README.test --- r-cran-lavaan-0.5.22/debian/README.test 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/debian/README.test 2017-09-30 06:00:39.000000000 +0000 @@ -0,0 +1,8 @@ +Notes on how this package can be tested. +──────────────────────────────────────── + +This package can be tested by running the provided test: + + sh run-unit-test + +in order to confirm its integrity. diff -Nru r-cran-lavaan-0.5.22/debian/tests/control r-cran-lavaan-0.5.23.1097/debian/tests/control --- r-cran-lavaan-0.5.22/debian/tests/control 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/debian/tests/control 2017-09-30 06:00:39.000000000 +0000 @@ -0,0 +1,5 @@ +Tests: run-unit-test +Depends: @, r-cran-testthat, +Restrictions: allow-stderr + + diff -Nru r-cran-lavaan-0.5.22/debian/tests/run-unit-test r-cran-lavaan-0.5.23.1097/debian/tests/run-unit-test --- r-cran-lavaan-0.5.22/debian/tests/run-unit-test 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/debian/tests/run-unit-test 2017-09-30 06:00:39.000000000 +0000 @@ -0,0 +1,17 @@ +#!/bin/sh -e + +pkgname=lavaan +debname=r-cran-lavaan + +if [ "$ADTTMP" = "" ] ; then + ADTTMP=`mktemp -d /tmp/${debname}-test.XXXXXX` + trap "rm -rf $ADTTMP" 0 INT QUIT ABRT PIPE TERM +fi +cd $ADTTMP +cp -a /usr/share/doc/$debname/tests/* $ADTTMP +gunzip -r * +for testfile in *.R; do + echo "BEGIN TEST $testfile" + LC_ALL=C R --no-save < $testfile +done + diff -Nru r-cran-lavaan-0.5.22/DESCRIPTION r-cran-lavaan-0.5.23.1097/DESCRIPTION --- r-cran-lavaan-0.5.22/DESCRIPTION 2016-09-24 16:02:02.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/DESCRIPTION 2017-02-24 22:28:29.000000000 +0000 @@ -1,6 +1,6 @@ Package: lavaan Title: Latent Variable Analysis -Version: 0.5-22 +Version: 0.5-23.1097 Authors@R: c(person(given = "Yves", family = "Rosseel", role = c("aut", "cre"), email = "Yves.Rosseel@UGent.be"), @@ -30,18 +30,24 @@ email = "mirtok2@gmail.com"), person(given = "Mariska", family = "Barendse", role = "ctb", - email = "m.t.barendse@rug.nl") + email = "m.t.barendse@rug.nl"), + person(given = "Michael", family = "Chow", + role = "ctb", + email = "machow@princeton.edu"), + person(given = "Terrence", family = "Jorgensen", + role = "ctb") ) Description: Fit a variety of latent variable models, including confirmatory factor analysis, structural equation modeling and latent growth curve models. Depends: R(>= 3.1.0) Imports: methods, stats4, stats, utils, graphics, MASS, mnormt, - pbivnorm, quadprog + pbivnorm, quadprog, numDeriv +Suggests: testthat License: GPL (>= 2) LazyData: yes URL: http://lavaan.org NeedsCompilation: no -Packaged: 2016-09-24 13:52:10 UTC; yves +Packaged: 2017-02-24 13:08:31 UTC; yves Author: Yves Rosseel [aut, cre], Daniel Oberski [ctb], Jarrett Byrnes [ctb], @@ -51,7 +57,9 @@ Michael Hallquist [ctb], Mijke Rhemtulla [ctb], Myrsini Katsikatsou [ctb], - Mariska Barendse [ctb] + Mariska Barendse [ctb], + Michael Chow [ctb], + Terrence Jorgensen [ctb] Maintainer: Yves Rosseel Repository: CRAN -Date/Publication: 2016-09-24 18:02:02 +Date/Publication: 2017-02-24 23:28:29 diff -Nru r-cran-lavaan-0.5.22/man/cfa.Rd r-cran-lavaan-0.5.23.1097/man/cfa.Rd --- r-cran-lavaan-0.5.22/man/cfa.Rd 2016-09-03 09:24:43.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/cfa.Rd 2017-01-28 12:43:17.000000000 +0000 @@ -4,25 +4,12 @@ \description{ Fit a Confirmatory Factor Analysis (CFA) model.} \usage{ -cfa(model = NULL, data = NULL, - meanstructure = "default", - conditional.x = "default", fixed.x = "default", - orthogonal = FALSE, std.lv = FALSE, - parameterization = "default", std.ov = FALSE, - missing = "default", ordered = NULL, - sample.cov = NULL, sample.cov.rescale = "default", - sample.mean = NULL, sample.nobs = NULL, - ridge = 1e-05, group = NULL, - group.label = NULL, group.equal = "", group.partial = "", - group.w.free = FALSE, cluster = NULL, constraints = '', - estimator = "default", likelihood = "default", link = "default", - information = "default", se = "default", test = "default", - bootstrap = 1000L, mimic = "default", representation = "default", - do.fit = TRUE, control = list(), WLS.V = NULL, NACOV = NULL, - zero.add = "default", zero.keep.margins = "default", - zero.cell.warn = TRUE, start = "default", - check = c("start", "post"), - verbose = FALSE, warn = TRUE, debug = FALSE) +cfa(model = NULL, data = NULL, ordered = NULL, + sample.cov = NULL, sample.mean = NULL, sample.nobs = NULL, + group = NULL, cluster = NULL, + constraints = "", WLS.V = NULL, NACOV = NULL, + ...) + } \arguments{ \item{model}{A description of the user-specified model. Typically, the model @@ -33,41 +20,6 @@ \item{data}{An optional data frame containing the observed variables used in the model. If some variables are declared as ordered factors, lavaan will treat them as ordinal variables.} -\item{meanstructure}{If \code{TRUE}, the means of the observed - variables enter the model. If \code{"default"}, the value is set based - on the user-specified model, and/or the values of other arguments.} -\item{conditional.x}{If \code{TRUE}, we set up the model conditional on - the exogenous `x' covariates; the model-implied sample statistics - only include the non-x variables. If \code{FALSE}, the exogenous `x' - variables are modeled jointly with the other variables, and the - model-implied statistics refect both sets of variables. If - \code{"default"}, the value is set depending on the estimator, and - whether or not the model involves categorical endogenous variables.} -\item{fixed.x}{If \code{TRUE}, the exogenous `x' covariates are considered - fixed variables and the means, variances and covariances of these variables - are fixed to their sample values. If \code{FALSE}, they are considered - random, and the means, variances and covariances are free parameters. If - \code{"default"}, the value is set depending on the mimic option.} -\item{orthogonal}{If \code{TRUE}, the exogenous latent variables - are assumed to be uncorrelated.} -\item{std.lv}{If \code{TRUE}, the metric of each latent variable is - determined by fixing their (residual) variances to 1.0. If \code{FALSE}, - the metric - of each latent variable is determined by fixing the factor loading of the - first indicator to 1.0.} -\item{parameterization}{Currently only used if data is categorical. If - \code{"delta"}, the delta parameterization is used. If \code{"theta"}, - the theta parameterization is used.} -\item{std.ov}{If \code{TRUE}, all observed variables are standardized - before entering the analysis.} -\item{missing}{If \code{"listwise"}, cases with missing values are removed - listwise from the data frame before analysis. If \code{"direct"} or - \code{"ml"} or \code{"fiml"} and the estimator is maximum likelihood, - Full Information Maximum Likelihood (FIML) estimation is used using all - available data in the data frame. This is only valid if the data are - missing completely at random (MCAR) or missing at random (MAR). If - \code{"default"}, the value is set depending on the estimator and the - mimic option.} \item{ordered}{Character vector. Only used if the data is in a data.frame. Treat these variables as ordered (ordinal) variables, if they are endogenous in the model. Importantly, all other variables will be treated @@ -80,133 +32,16 @@ internally rescaled by multiplying it with a factor (N-1)/N, to ensure that the covariance matrix has been divided by N. This can be turned off by setting the \code{sample.cov.rescale} argument to \code{FALSE}.} -\item{sample.cov.rescale}{If \code{TRUE}, the sample covariance matrix provided - by the user is internally rescaled by multiplying it with a factor (N-1)/N. - If \code{"default"}, the value is set depending on the estimator and the - likelihood option: it is set to \code{TRUE} if maximum likelihood - estimation is used and \code{likelihood="normal"}, and \code{FALSE} - otherwise.} \item{sample.mean}{A sample mean vector. For a multiple group analysis, a list with a mean vector for each group.} \item{sample.nobs}{Number of observations if the full data frame is missing and only sample moments are given. For a multiple group analysis, a list or a vector with the number of observations for each group.} -\item{ridge}{Numeric. Small constant used for ridging. Only used if the sample covariance matrix is non positive definite.} \item{group}{A variable name in the data frame defining the groups in a multiple group analysis.} -\item{group.label}{A character vector. The user can specify which group (or -factor) levels need to be selected from the grouping variable, and in which -order. If \code{NULL} (the default), all grouping levels are selected, in the -order as they appear in the data.} -\item{group.equal}{A vector of character strings. Only used in - a multiple group analysis. Can be one or more of the following: - \code{"loadings"}, \code{"intercepts"}, \code{"means"}, \code{"thresholds"}, - \code{"regressions"}, \code{"residuals"}, - \code{"residual.covariances"}, \code{"lv.variances"} or - \code{"lv.covariances"}, specifying the pattern of equality - constraints across multiple groups.} -\item{group.partial}{A vector of character strings containing the labels - of the parameters which should be free in all groups (thereby - overriding the group.equal argument for some specific parameters).} -\item{group.w.free}{Logical. If \code{TRUE}, the group frequencies are - considered to be free parameters in the model. In this case, a - Poisson model is fitted to estimate the group frequencies. If - \code{FALSE} (the default), the group frequencies are fixed to their - observed values.} \item{cluster}{Not used yet.} \item{constraints}{Additional (in)equality constraints not yet included in the model syntax. See \code{\link{model.syntax}} for more information.} -\item{estimator}{The estimator to be used. Can be one of the following: - \code{"ML"} for maximum likelihood, \code{"GLS"} for generalized least - squares, \code{"WLS"} for weighted least squares (sometimes called ADF - estimation), \code{"ULS"} for unweighted least squares and \code{"DWLS"} for - diagonally weighted least squares. These are the main options that affect - the estimation. For convenience, the \code{"ML"} option can be extended - as \code{"MLM"}, \code{"MLMV"}, \code{"MLMVS"}, \code{"MLF"}, and - \code{"MLR"}. The estimation will still be plain \code{"ML"}, but now - with robust standard errors and a robust (scaled) test statistic. For - \code{"MLM"}, \code{"MLMV"}, \code{"MLMVS"}, classic robust standard - errors are used (\code{se="robust.sem"}); for \code{"MLF"}, standard - errors are based on first-order derivatives (\code{se="first.order"}); - for \code{"MLR"}, `Huber-White' robust standard errors are used - (\code{se="robust.huber.white"}). In addition, \code{"MLM"} will compute - a Satorra-Bentler scaled (mean adjusted) test statistic - (\code{test="satorra.bentler"}) , \code{"MLMVS"} will compute a - mean and variance adjusted test statistic (Satterthwaite style) - (\code{test="mean.var.adjusted"}), \code{"MLMV"} will compute a mean - and variance adjusted test statistic (scaled and shifted) - (\code{test="scaled.shifted"}), and \code{"MLR"} will - compute a test statistic which is asymptotically - equivalent to the Yuan-Bentler T2-star test statistic. Analogously, - the estimators \code{"WLSM"} and \code{"WLSMV"} imply the \code{"DWLS"} - estimator (not the \code{"WLS"} estimator) with robust standard errors - and a mean or mean and variance adjusted test statistic. Estimators - \code{"ULSM"} and \code{"ULSMV"} imply the \code{"ULS"} - estimator with robust standard errors - and a mean or mean and variance adjusted test statistic.} -\item{likelihood}{Only relevant for ML estimation. If \code{"wishart"}, - the wishart likelihood approach is used. In this approach, the covariance - matrix has been divided by N-1, and both standard errors and test - statistics are based on N-1. - If \code{"normal"}, the normal likelihood approach is used. Here, - the covariance matrix has been divided by N, and both standard errors - and test statistics are based on N. If \code{"default"}, it depends - on the mimic option: if \code{mimic="lavaan"} or \code{mimic="Mplus"}, - normal likelihood is used; otherwise, wishart likelihood is used.} -\item{link}{Currently only used if estimator is MML. If \code{"logit"}, - a logit link is used for binary and ordered observed variables. - If \code{"probit"}, a probit link is used. If \code{"default"}, - it is currently set to \code{"probit"} (but this may change).} -\item{information}{If \code{"expected"}, the expected information matrix - is used (to compute the standard errors). If \code{"observed"}, the - observed information matrix is used. If \code{"default"}, the value is - set depending on the estimator and the mimic option.} -\item{se}{If \code{"standard"}, conventional standard errors - are computed based on inverting the (expected or observed) information - matrix. If \code{"first.order"}, standard errors are computed based on - first-order derivatives. If \code{"robust.sem"}, conventional robust - standard errors are computed. If \code{"robust.huber.white"}, - standard errors are computed based on the `mlr' (aka pseudo ML, - Huber-White) approach. - If \code{"robust"}, either \code{"robust.sem"} or - \code{"robust.huber.white"} is used depending on the estimator, - the mimic option, and whether the data are complete or not. - If \code{"boot"} or \code{"bootstrap"}, bootstrap standard errors are - computed using standard bootstrapping (unless Bollen-Stine bootstrapping - is requested for the test statistic; in this case bootstrap standard - errors are computed using model-based bootstrapping). - If \code{"none"}, no standard errors are computed.} -\item{test}{If \code{"standard"}, a conventional chi-square test is computed. - If \code{"Satorra.Bentler"}, a Satorra-Bentler scaled test statistic is - computed. If \code{"Yuan.Bentler"}, a Yuan-Bentler scaled test statistic - is computed. If \code{"mean.var.adjusted"} or \code{"Satterthwaite"}, a - mean and variance adjusted test statistic is compute. - If \code{"scaled.shifted"}, an alternative mean and variance adjusted test - statistic is computed (as in Mplus version 6 or higher). - If \code{"boot"} or \code{"bootstrap"} or - \code{"Bollen.Stine"}, the Bollen-Stine bootstrap is used to compute - the bootstrap probability value of the test statistic. - If \code{"default"}, the value depends on the - values of other arguments.} -\item{bootstrap}{Number of bootstrap draws, if bootstrapping is used.} -\item{mimic}{If \code{"Mplus"}, an attempt is made to mimic the Mplus - program. If \code{"EQS"}, an attempt is made to mimic the EQS program. - If \code{"default"}, the value is (currently) set to \code{"lavaan"}, - which is very close to \code{"Mplus"}.} -\item{representation}{If \code{"LISREL"} the classical LISREL matrix - representation is used to represent the model (using the all-y variant).} -\item{do.fit}{If \code{FALSE}, the model is not fit, and the current - starting values of the model parameters are preserved.} -\item{control}{A list containing control parameters passed to the optimizer. - By default, lavaan uses \code{"nlminb"}. See the manpage of - \code{\link{nlminb}} for an overview of the control parameters. - A different optimizer can be chosen by setting the value of - \code{optim.method}. For unconstrained optimization (the model syntax - does not include any "==", ">" or "<" operators), - the available options are \code{"nlminb"} (the default), \code{"BFGS"} and - \code{"L-BFGS-B"}. See the manpage of the \code{\link{optim}} function for - the control parameters of the latter two options. For constrained - optimization, the only available option is \code{"nlminb.constr"}.} \item{WLS.V}{A user provided weight matrix to be used by estimator \code{"WLS"}; if the estimator is \code{"DWLS"}, only the diagonal of this matrix will be used. For a multiple group analysis, a list with a weight matrix @@ -224,47 +59,8 @@ For a multiple group analysis, a list with an asymptotic variance-covariance matrix for each group. See the \code{WLS.V} argument for information about the order of the elements.} -\item{zero.add}{A numeric vector containing two values. These values affect the - calculation of polychoric correlations when some frequencies in the - bivariate table are zero. - The first value only applies for 2x2 tables. The second value for larger - tables. This value is added to the zero frequency in the bivariate table. - If \code{"default"}, the value is set depending on the \code{"mimic"} - option. By default, lavaan uses \code{zero.add = c(0.5. 0.0)}.} -\item{zero.keep.margins}{Logical. This argument only affects the computation - of polychoric correlations for 2x2 tables with an empty cell, and where a - value is added to the empty cell. If \code{TRUE}, the other values of the - frequency table are adjusted so that all margins are unaffected. If - \code{"default"}, the value is set depending on the \code{"mimic"}. The - default is \code{TRUE}.} -\item{zero.cell.warn}{Logical. Only used if some observed endogenous variables - are categorical. If \code{TRUE}, give a warning if one or more cells - of a bivariate frequency table are empty.} -\item{start}{If it is a character string, - the two options are currently \code{"simple"} and \code{"Mplus"}. - In the first - case, all parameter values are set to zero, except the factor loadings - (set to one), the variances of latent variables (set to 0.05), and - the residual variances of observed variables (set to half the observed - variance). - If \code{"Mplus"}, we use a similar scheme, but the factor loadings are - estimated using the fabin3 estimator (tsls) per factor. - If \code{start} is a fitted - object of class \code{\linkS4class{lavaan}}, the estimated values of - the corresponding parameters will be extracted. If it is a model list, - for example the output of the \code{paramaterEstimates()} function, - the values of the \code{est} or \code{start} or \code{ustart} column - (whichever is found first) will be extracted.} -\item{check}{Character vector. If \code{check} includes \code{"start"}, - the starting values are checked for possibly inconsistent values (for - example values implying correlations larger than one); - if \code{check} includes \code{"post"}, a check is performed after - (post) fitting, to check if the solution is admissable.} -\item{verbose}{If \code{TRUE}, the function value is printed out during - each iteration.} -\item{warn}{If \code{TRUE}, some (possibly harmless) warnings are printed - out during the iterations.} -\item{debug}{If \code{TRUE}, debugging information is printed out.} +\item{...}{Many more additional options can be defined, using 'name = value'. + See \code{\link{lavOptions}} for a complete list.} } \details{ The \code{cfa} function is a wrapper for the more general diff -Nru r-cran-lavaan-0.5.22/man/fsr.Rd r-cran-lavaan-0.5.23.1097/man/fsr.Rd --- r-cran-lavaan-0.5.22/man/fsr.Rd 2016-07-03 18:43:51.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/fsr.Rd 2017-02-18 10:18:54.000000000 +0000 @@ -5,7 +5,8 @@ Fit a SEM model using factor score regression.} \usage{ fsr(model = NULL, data = NULL, cmd = "sem", - fsr.method = "Croon", fs.method = "Bartlett", ...) + fsr.method = "Croon", fs.method = "Bartlett", + fs.scores = FALSE, Gamma.NT = TRUE, lvinfo = FALSE, ...) } \arguments{ \item{model}{A description of the user-specified model. Typically, the model @@ -23,6 +24,14 @@ options are \code{naive}, \code{Skrondal-Laake}, and \code{Croon}.} \item{fs.method}{Character. Factor score estimation method. Possible options are \code{Bartlett} and \code{regression}.} +\item{fs.scores}{Logical. If \code{TRUE}, explicitly compute factor scores; if +\code{FALSE}, only compute the mean vector and variance matrix of the +factor scores.} +\item{Gamma.NT}{Logical. Only needed when \code{se="robust.sem"} and +data is missing; if \code{TRUE}, compute Gamma (N times the variance +matrix of the sample statistics) assuming normality.} +\item{lvinfo}{Logical. If \code{TRUE}, return latent variable information +as an attribute to the output.} \item{...}{Further arguments that we pass to the \code{"cfa"}, \code{"sem"} or \code{"lavaan"} functions.} } diff -Nru r-cran-lavaan-0.5.22/man/growth.Rd r-cran-lavaan-0.5.23.1097/man/growth.Rd --- r-cran-lavaan-0.5.22/man/growth.Rd 2016-09-03 09:25:11.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/growth.Rd 2017-01-29 10:22:24.000000000 +0000 @@ -4,24 +4,11 @@ \description{ Fit a Growth Curve model.} \usage{ -growth(model = NULL, data = NULL, - conditional.x = "default", fixed.x = "default", - orthogonal = FALSE, std.lv = FALSE, - parameterization = "default", std.ov = FALSE, - missing = "default", ordered = NULL, - sample.cov = NULL, sample.cov.rescale = "default", - sample.mean = NULL, sample.nobs = NULL, - ridge = 1e-05, group = NULL, - group.label = NULL, group.equal = "", group.partial = "", - group.w.free = FALSE, cluster = NULL, constraints = '', - estimator = "default", likelihood = "default", link = "default", - information = "default", se = "default", test = "default", - bootstrap = 1000L, mimic = "default", representation = "default", - do.fit = TRUE, control = list(), WLS.V = NULL, NACOV = NULL, - zero.add = "default", zero.keep.margins = "default", - zero.cell.warn = TRUE, start = "default", - check = c("start", "post"), - verbose = FALSE, warn = TRUE, debug = FALSE) +growth(model = NULL, data = NULL, ordered = NULL, + sample.cov = NULL, sample.mean = NULL, sample.nobs = NULL, + group = NULL, cluster = NULL, + constraints = "", WLS.V = NULL, NACOV = NULL, + ...) } \arguments{ \item{model}{A description of the user-specified model. Typically, the model @@ -32,38 +19,6 @@ \item{data}{An optional data frame containing the observed variables used in the model. If some variables are declared as ordered factors, lavaan will treat them as ordinal variables.} -\item{conditional.x}{If \code{TRUE}, we set up the model conditional on - the exogenous `x' covariates; the model-implied sample statistics - only include the non-x variables. If \code{FALSE}, the exogenous `x' - variables are modeled jointly with the other variables, and the - model-implied statistics refect both sets of variables. If - \code{"default"}, the value is set depending on the estimator, and - whether or not the model involves categorical endogenous variables.} -\item{fixed.x}{If \code{TRUE}, the exogenous `x' covariates are considered - fixed variables and the means, variances and covariances of these variables - are fixed to their sample values. If \code{FALSE}, they are considered - random, and the means, variances and covariances are free parameters. If - \code{"default"}, the value is set depending on the mimic option.} -\item{orthogonal}{If \code{TRUE}, the exogenous latent variables - are assumed to be uncorrelated.} -\item{std.lv}{If \code{TRUE}, the metric of each latent variable is - determined by fixing their (residual) - variances to 1.0. If \code{FALSE}, the metric - of each latent variable is determined by fixing the factor loading of the - first indicator to 1.0.} -\item{parameterization}{Currently only used if data is categorical. If - \code{"delta"}, the delta parameterization is used. If \code{"theta"}, - the theta parameterization is used.} -\item{std.ov}{If \code{TRUE}, all observed variables are standardized - before entering the analysis.} -\item{missing}{If \code{"listwise"}, cases with missing values are removed - listwise from the data frame before analysis. If \code{"direct"} or - \code{"ml"} or \code{"fiml"} and the estimator is maximum likelihood, - Full Information Maximum Likelihood (FIML) estimation is used using all - available data in the data frame. This is only valid if the data are - missing completely at random (MCAR) or missing at random (MAR). If - \code{"default"}, the value is set depending on the estimator and the - mimic option.} \item{ordered}{Character vector. Only used if the data is in a data.frame. Treat these variables as ordered (ordinal) variables, if they are endogenous in the model. Importantly, all other variables will be treated @@ -76,121 +31,16 @@ internally rescaled by multiplying it with a factor (N-1)/N, to ensure that the covariance matrix has been divided by N. This can be turned off by setting the \code{sample.cov.rescale} argument to \code{FALSE}.} -\item{sample.cov.rescale}{If \code{TRUE}, the sample covariance matrix provided - by the user is internally rescaled by multiplying it with a factor (N-1)/N. - If \code{"default"}, the value is set depending on the estimator and the - likelihood option: it is set to \code{TRUE} if maximum likelihood - estimation is used and \code{likelihood="normal"}, and \code{FALSE} - otherwise.} \item{sample.mean}{A sample mean vector. For a multiple group analysis, a list with a mean vector for each group.} \item{sample.nobs}{Number of observations if the full data frame is missing and only sample moments are given. For a multiple group analysis, a list or a vector with the number of observations for each group.} -\item{ridge}{Numeric. Small constant used for ridging. Only used if the sample covariance matrix is non positive definite.} \item{group}{A variable name in the data frame defining the groups in a multiple group analysis.} -\item{group.label}{A character vector. The user can specify which group (or -factor) levels need to be selected from the grouping variable, and in which -order. If \code{NULL} (the default), all grouping levels are selected, in the -order as they appear in the data.} -\item{group.equal}{A vector of character strings. Only used in - a multiple group analysis. Can be one or more of the following: - \code{"loadings"}, \code{"intercepts"}, \code{"means"},\code{"thresholds"}, - \code{"regressions"}, \code{"residuals"}, - \code{"residual.covariances"}, \code{"lv.variances"} or - \code{"lv.covariances"}, specifying the pattern of equality - constraints across multiple groups.} -\item{group.partial}{A vector of character strings containing the labels - of the parameters which should be free in all groups (thereby - overriding the group.equal argument for some specific parameters).} -\item{group.w.free}{Logical. If \code{TRUE}, the group frequencies are - considered to be free parameters in the model. In this case, a - Poisson model is fitted to estimate the group frequencies. If - \code{FALSE} (the default), the group frequencies are fixed to their - observed values.} \item{cluster}{Not used yet.} \item{constraints}{Additional (in)equality constraints not yet included in the model syntax. See \code{\link{model.syntax}} for more information.} -\item{estimator}{The estimator to be used. Can be one of the following: - \code{"ML"} for maximum likelihood, \code{"GLS"} for generalized least - squares, \code{"WLS"} for weighted least squares (sometimes called ADF - estimation), \code{"ULS"} for unweighted least squares and \code{"DWLS"} for - diagonally weighted least squares. These are the main options that affect - the estimation. For convenience, the \code{"ML"} option can be extended - as \code{"MLM"}, \code{"MLMV"}, \code{"MLMVS"}, \code{"MLF"}, and - \code{"MLR"}. The estimation will still be plain \code{"ML"}, but now - with robust standard errors and a robust (scaled) test statistic. For - \code{"MLM"}, \code{"MLMV"}, \code{"MLMVS"}, classic robust standard - errors are used (\code{se="robust.sem"}); for \code{"MLF"}, standard - errors are based on first-order derivatives (\code{se="first.order"}); - for \code{"MLR"}, `Huber-White' robust standard errors are used - (\code{se="robust.huber.white"}). In addition, \code{"MLM"} will compute - a Satorra-Bentler scaled (mean adjusted) test statistic - (\code{test="satorra.bentler"}) , \code{"MLMVS"} will compute a - mean and variance adjusted test statistic (Satterthwaite style) - (\code{test="mean.var.adjusted"}), \code{"MLMV"} will compute a mean - and variance adjusted test statistic (scaled and shifted) - (\code{test="scaled.shifted"}), and \code{"MLR"} will - compute a test statistic which is asymptotically - equivalent to the Yuan-Bentler T2-star test statistic. Analogously, - the estimators \code{"WLSM"} and \code{"WLSMV"} imply the \code{"DWLS"} - estimator (not the \code{"WLS"} estimator) with robust standard errors - and a mean or mean and variance adjusted test statistic. Estimators - \code{"ULSM"} and \code{"ULSMV"} imply the \code{"ULS"} - estimator with robust standard errors - and a mean or mean and variance adjusted test statistic.} -\item{likelihood}{Only relevant for ML estimation. If \code{"wishart"}, - the wishart likelihood approach is used. In this approach, the covariance - matrix has been divided by N-1, and both standard errors and test - statistics are based on N-1. - If \code{"normal"}, the normal likelihood approach is used. Here, - the covariance matrix has been divided by N, and both standard errors - and test statistics are based on N. If \code{"default"}, it depends - on the mimic option: if \code{mimic="lavaan"} or \code{mimic="Mplus"}, - normal likelihood is used; otherwise, wishart likelihood is used.} -\item{link}{Currently only used if estimator is MML. If \code{"logit"}, - a logit link is used for binary and ordered observed variables. - If \code{"probit"}, a probit link is used. If \code{"default"}, - it is currently set to \code{"probit"} (but this may change).} -\item{information}{If \code{"expected"}, the expected information matrix - is used (to compute the standard errors). If \code{"observed"}, the - observed information matrix is used. If \code{"default"}, the value is - set depending on the estimator and the mimic option.} -\item{se}{If \code{"standard"}, conventional standard errors - are computed based on inverting the (expected or observed) information - matrix. If \code{"first.order"}, standard errors are computed based on - first-order derivatives. If \code{"robust.sem"}, conventional robust - standard errors are computed. If \code{"robust.huber.white"}, - standard errors are computed based on the `mlr' (aka pseudo ML, - Huber-White) approach. - If \code{"robust"}, either \code{"robust.sem"} or - \code{"robust.huber.white"} is used depending on the estimator, - the mimic option, and whether the data are complete or not. - If \code{"boot"} or \code{"bootstrap"}, bootstrap standard errors are - computed using standard bootstrapping (unless Bollen-Stine bootstrapping - is requested for the test statistic; in this case bootstrap standard - errors are computed using model-based bootstrapping). - If \code{"none"}, no standard errors are computed.} -\item{test}{If \code{"standard"}, a conventional chi-square test is computed. - If \code{"Satorra.Bentler"}, a Satorra-Bentler scaled test statistic is - computed. If \code{"Yuan.Bentler"}, a Yuan-Bentler scaled test statistic - is computed. If \code{"mean.var.adjusted"} or \code{"Satterthwaite"}, a - mean and variance adjusted test statistic is compute. - If \code{"scaled.shifted"}, an alternative mean and variance adjusted test - statistic is computed (as in Mplus version 6 or higher). - If \code{"boot"} or \code{"bootstrap"} or - \code{"Bollen.Stine"}, the Bollen-Stine bootstrap is used to compute - the bootstrap probability value of the test statistic. - If \code{"default"}, the value depends on the - values of other arguments.} -\item{bootstrap}{Number of bootstrap draws, if bootstrapping is used.} -\item{mimic}{If \code{"Mplus"}, an attempt is made to mimic the Mplus - program. If \code{"EQS"}, an attempt is made to mimic the EQS program. - If \code{"default"}, the value is (currently) set to to \code{"lavaan"}, - which is very close to\code{"Mplus"}.} -\item{representation}{If \code{"LISREL"} the classical LISREL matrix - representation is used to represent the model (using the all-y variant).} \item{WLS.V}{A user provided weight matrix to be used by estimator \code{"WLS"}; if the estimator is \code{"DWLS"}, only the diagonal of this matrix will be used. For a multiple group analysis, a list with a weight matrix @@ -208,59 +58,8 @@ For a multiple group analysis, a list with an asymptotic variance-covariance matrix for each group. See the \code{WLS.V} argument for information about the order of the elements.} -\item{zero.add}{A numeric vector containing two values. These values affect the - calculation of polychoric correlations when some frequencies in the - bivariate table are zero. - The first value only applies for 2x2 tables. The second value for larger - tables. This value is added to the zero frequency in the bivariate table. - If \code{"default"}, the value is set depending on the \code{"mimic"} - option. By default, lavaan uses \code{zero.add = c(0.5. 0.0)}.} -\item{zero.keep.margins}{Logical. This argument only affects the computation - of polychoric correlations for 2x2 tables with an empty cell, and where a - value is added to the empty cell. If \code{TRUE}, the other values of the - frequency table are adjusted so that all margins are unaffected. If - \code{"default"}, the value is set depending on the \code{"mimic"}. The - default is \code{TRUE}.} -\item{zero.cell.warn}{Logical. Only used if some observed endogenous variables - are categorical. If \code{TRUE}, give a warning if one or more cells - of a bivariate frequency table are empty.} -\item{start}{If it is a character string, - the two options are currently \code{"simple"} and \code{"Mplus"}. - In the first - case, all parameter values are set to zero, except the factor loadings - (set to one), the variances of latent variables (set to 0.05), and - the residual variances of observed variables (set to half the observed - variance). - If \code{"Mplus"}, we use a similar scheme, but the factor loadings are - estimated using the fabin3 estimator (tsls) per factor. - If \code{start} is a fitted - object of class \code{\linkS4class{lavaan}}, the estimated values of - the corresponding parameters will be extracted. If it is a model list, - for example the output of the \code{paramaterEstimates()} function, - the values of the \code{est} or \code{start} or \code{ustart} column - (whichever is found first) will be extracted.} -\item{do.fit}{If \code{FALSE}, the model is not fit, and the current - starting values of the model parameters are preserved.} -\item{control}{A list containing control parameters passed to the optimizer. - By default, lavaan uses \code{"nlminb"}. See the manpage of - \code{\link{nlminb}} for an overview of the control parameters. - A different optimizer can be chosen by setting the value of - \code{optim.method}. For unconstrained optimization (the model syntax - does not include any "==", ">" or "<" operators), - the available options are \code{"nlminb"} (the default), \code{"BFGS"} and - \code{"L-BFGS-B"}. See the manpage of the \code{\link{optim}} function for - the control parameters of the latter two options. For constrained - optimization, the only available option is \code{"nlminb.constr"}.} -\item{check}{Character vector. If \code{check} includes \code{"start"}, - the starting values are checked for possibly inconsistent values (for - example values implying correlations larger than one); - if \code{check} includes \code{"post"}, a check is performed after - (post) fitting, to check if the solution is admissable.} -\item{verbose}{If \code{TRUE}, the function value is printed out during - each iteration.} -\item{warn}{If \code{TRUE}, some (possibly harmless) warnings are printed - out during the iterations.} -\item{debug}{If \code{TRUE}, debugging information is printed out.} +\item{...}{Many more additional options can be defined, using 'name = value'. + See \code{\link{lavOptions}} for a complete list.} } \details{ The \code{growth} function is a wrapper for the more general diff -Nru r-cran-lavaan-0.5.22/man/inspectSampleCov.Rd r-cran-lavaan-0.5.23.1097/man/inspectSampleCov.Rd --- r-cran-lavaan-0.5.22/man/inspectSampleCov.Rd 2013-02-04 19:19:32.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/inspectSampleCov.Rd 2017-01-31 16:48:52.000000000 +0000 @@ -17,7 +17,7 @@ One must supply both a model, coded with proper \code{\link{model.syntax}} and a data frame from which a covariance matrix will be calculated. This function essentially calls \code{\link{sem}}, but doesn't fit the model, then uses -\code{\link{inspect}} to get the sample covariance matrix and meanstructure. +\code{\link{lavInspect}} to get the sample covariance matrix and meanstructure. } -\section{See also}{\code{\link{sem}}, \code{\link{inspect}} } +\section{See also}{\code{\link{sem}}, \code{\link{lavInspect}} } diff -Nru r-cran-lavaan-0.5.22/man/lavaan-class.Rd r-cran-lavaan-0.5.23.1097/man/lavaan-class.Rd --- r-cran-lavaan-0.5.22/man/lavaan-class.Rd 2016-08-28 15:54:52.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/lavaan-class.Rd 2017-01-31 16:50:27.000000000 +0000 @@ -12,7 +12,6 @@ \alias{nobs} %% not imported in < 2.13 \alias{nobs,lavaan-method} \alias{logLik,lavaan-method} -\alias{inspect,lavaan-method} \alias{update,lavaan-method} \alias{show,lavaan-method} \alias{summary,lavaan-method} @@ -118,9 +117,6 @@ returns the log-likelihood of the fitted model, if maximum likelihood estimation was used. The \code{\link[stats]{AIC}} and \code{\link[stats]{BIC}} methods automatically work via \code{logLik()}.} - \item{inspect}{\code{signature(object = "lavaan", what = "free")}: This - method is now a shortcut for the \code{lavInspect()} function. See - \link{lavInspect} for more details.} \item{show}{\code{signature(object = "lavaan")}: Print a short summary of the model fit} \item{summary}{\code{signature(object = "lavaan", header = TRUE, fit.measures=FALSE, estimates = TRUE, ci = FALSE, fmi = FALSE, standardized = FALSE, rsquare=FALSE, std.nox = FALSE, modindices=FALSE, ci=FALSE, nd = 3L)}: @@ -146,7 +142,7 @@ decimal point to be printed (currently only in the parameter estimates section.) Nothing is returned (use - \code{inspect} or another extractor function + \code{lavInspect} or another extractor function to extract information from a fitted model).} } } @@ -161,7 +157,7 @@ \seealso{ \code{\link{cfa}}, \code{\link{sem}}, \code{\link{growth}}, \code{\link{fitMeasures}}, \code{\link{standardizedSolution}}, -\code{\link{parameterEstimates}}, +\code{\link{parameterEstimates}}, \code{\link{lavInspect}}, \code{\link{modindices}} } \examples{ @@ -172,11 +168,7 @@ fit <- cfa(HS.model, data=HolzingerSwineford1939) summary(fit, standardized=TRUE, fit.measures=TRUE, rsquare=TRUE) -inspect(fit, "free") -inspect(fit, "start") -inspect(fit, "rsquare") -inspect(fit, "fit") -fitted.values(fit) +fitted(fit) coef(fit) resid(fit, type="normalized") } diff -Nru r-cran-lavaan-0.5.22/man/lavaanList.Rd r-cran-lavaan-0.5.23.1097/man/lavaanList.Rd --- r-cran-lavaan-0.5.22/man/lavaanList.Rd 2016-07-19 20:33:45.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/lavaanList.Rd 2017-01-30 16:16:48.000000000 +0000 @@ -80,7 +80,8 @@ # a data generating function generateData <- function() simulateData(HS.model, sample.nobs = 100) -fit <- semList(HS.model, dataFunction = generateData, ndat = 10, +set.seed(1234) +fit <- semList(HS.model, dataFunction = generateData, ndat = 5, store.slots = "partable") # show parameter estimates, per dataset diff -Nru r-cran-lavaan-0.5.22/man/lavaan.Rd r-cran-lavaan-0.5.23.1097/man/lavaan.Rd --- r-cran-lavaan-0.5.22/man/lavaan.Rd 2016-09-03 09:25:20.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/lavaan.Rd 2017-01-28 12:42:25.000000000 +0000 @@ -4,30 +4,13 @@ \description{ Fit a latent variable model.} \usage{ -lavaan(model = NULL, data = NULL, - model.type = "sem", meanstructure = "default", - int.ov.free = FALSE, int.lv.free = FALSE, - conditional.x = "default", fixed.x = "default", - orthogonal = FALSE, std.lv = FALSE, - parameterization = "default", auto.fix.first = FALSE, - auto.fix.single = FALSE, auto.var = FALSE, auto.cov.lv.x = FALSE, - auto.cov.y = FALSE, auto.th = FALSE, auto.delta = FALSE, - std.ov = FALSE, missing = "default", ordered = NULL, - sample.cov = NULL, sample.cov.rescale = "default", - sample.mean = NULL, sample.nobs = NULL, ridge = 1e-05, - group = NULL, group.label = NULL, group.equal = "", group.partial = "", - group.w.free = FALSE, cluster = NULL, - constraints = "", estimator = "default", - likelihood = "default", link = "default", information = "default", - se = "default", test = "default", bootstrap = 1000L, mimic = "default", - representation = "default", do.fit = TRUE, control = list(), - WLS.V = NULL, NACOV = NULL, - zero.add = "default", zero.keep.margins = "default", - zero.cell.warn = TRUE, start = "default", - slotOptions = NULL, slotParTable = NULL, - slotSampleStats = NULL, slotData = NULL, slotModel = NULL, - slotCache = NULL, check = c("start", "post"), - verbose = FALSE, warn = TRUE, debug = FALSE) +lavaan(model = NULL, data = NULL, ordered = NULL, + sample.cov = NULL, sample.mean = NULL, sample.nobs = NULL, + group = NULL, cluster = NULL, + constraints = "", WLS.V = NULL, NACOV = NULL, + slotOptions = NULL, slotParTable = NULL, slotSampleStats = NULL, + slotData = NULL, slotModel = NULL, slotCache = NULL, + ...) } \arguments{ \item{model}{A description of the user-specified model. Typically, the model @@ -38,69 +21,11 @@ \item{data}{An optional data frame containing the observed variables used in the model. If some variables are declared as ordered factors, lavaan will treat them as ordinal variables.} -\item{model.type}{Set the model type: possible values - are \code{"cfa"}, \code{"sem"} or \code{"growth"}. This may affect - how starting values are computed, and may be used to alter the terminology - used in the summary output, or the layout of path diagrams that are - based on a fitted lavaan object.} -\item{meanstructure}{If \code{TRUE}, the means of the observed - variables enter the model. If \code{"default"}, the value is set based - on the user-specified model, and/or the values of other arguments.} -\item{int.ov.free}{If \code{FALSE}, the intercepts of the observed variables - are fixed to zero.} -\item{int.lv.free}{If \code{FALSE}, the intercepts of the latent variables - are fixed to zero.} -\item{conditional.x}{If \code{TRUE}, we set up the model conditional on - the exogenous `x' covariates; the model-implied sample statistics - only include the non-x variables. If \code{FALSE}, the exogenous `x' - variables are modeled jointly with the other variables, and the - model-implied statistics refect both sets of variables. If - \code{"default"}, the value is set depending on the estimator, and - whether or not the model involves categorical endogenous variables.} -\item{fixed.x}{If \code{TRUE}, the exogenous `x' covariates are considered - fixed variables and the means, variances and covariances of these variables - are fixed to their sample values. If \code{FALSE}, they are considered - random, and the means, variances and covariances are free parameters. If - \code{"default"}, the value is set depending on the mimic option.} -\item{orthogonal}{If \code{TRUE}, the exogenous latent variables - are assumed to be uncorrelated.} -\item{std.lv}{If \code{TRUE}, the metric of each latent variable is - determined by fixing their (residual) - variances to 1.0. If \code{FALSE}, the metric - of each latent variable is determined by fixing the factor loading of the - first indicator to 1.0.} -\item{parameterization}{Currently only used if data is categorical. If - \code{"delta"}, the delta parameterization is used. If \code{"theta"}, - the theta parameterization is used.} -\item{auto.fix.first}{If \code{TRUE}, the factor loading of the first indicator - is set to 1.0 for every latent variable.} -\item{auto.fix.single}{If \code{TRUE}, the residual variance (if included) - of an observed indicator is set to zero if it is the only indicator of a - latent variable.} -\item{auto.var}{If \code{TRUE}, the residual variances and the variances - of exogenous latent variables are included in the model and set free.} -\item{auto.cov.lv.x}{If \code{TRUE}, the covariances of exogenous latent - variables are included in the model and set free.} -\item{auto.cov.y}{If \code{TRUE}, the covariances of dependent variables - (both observed and latent) are included in the model and set free.} -\item{auto.th}{If \code{TRUE}, thresholds for limited dependent variables - are included in the model and set free.} -\item{auto.delta}{If \code{TRUE}, response scaling parameters for limited - dependent variables are included in the model and set free.} -\item{std.ov}{If \code{TRUE}, all observed variables are standardized - before entering the analysis.} -\item{missing}{If \code{"listwise"}, cases with missing values are removed - listwise from the data frame before analysis. If \code{"direct"} or - \code{"ml"} or \code{"fiml"} and the estimator is maximum likelihood, - Full Information Maximum Likelihood (FIML) estimation is used using all - available data in the data frame. This is only valid if the data are - missing completely at random (MCAR) or missing at random (MAR). If - \code{"default"}, the value is set depending on the estimator and the - mimic option.} \item{ordered}{Character vector. Only used if the data is in a data.frame. Treat these variables as ordered (ordinal) variables, if they are endogenous in the model. Importantly, all other variables will be treated - as numeric (unless they are declared as ordered in the original data.frame.)} + as numeric (unless they are declared as ordered in the original + data.frame.)} \item{sample.cov}{Numeric matrix. A sample variance-covariance matrix. The rownames and/or colnames must contain the observed variable names. For a multiple group analysis, a list with a variance-covariance matrix @@ -109,133 +34,16 @@ internally rescaled by multiplying it with a factor (N-1)/N, to ensure that the covariance matrix has been divided by N. This can be turned off by setting the \code{sample.cov.rescale} argument to \code{FALSE}.} -\item{sample.cov.rescale}{If \code{TRUE}, the sample covariance matrix provided - by the user is internally rescaled by multiplying it with a factor (N-1)/N. - If \code{"default"}, the value is set depending on the estimator and the - likelihood option: it is set to \code{TRUE} if maximum likelihood - estimation is used and \code{likelihood="normal"}, and \code{FALSE} - otherwise.} \item{sample.mean}{A sample mean vector. For a multiple group analysis, a list with a mean vector for each group.} \item{sample.nobs}{Number of observations if the full data frame is missing and only sample moments are given. For a multiple group analysis, a list or a vector with the number of observations for each group.} -\item{ridge}{Numeric. Small constant used for ridging. Only used if the sample covariance matrix is non positive definite.} \item{group}{A variable name in the data frame defining the groups in a multiple group analysis.} -\item{group.label}{A character vector. The user can specify which group (or -factor) levels need to be selected from the grouping variable, and in which -order. If missing, all grouping levels are selected, in the -order as they appear in the data.} -\item{group.equal}{A vector of character strings. Only used in - a multiple group analysis. Can be one or more of the following: - \code{"loadings"}, \code{"intercepts"}, \code{"means"},\code{"thresholds"}, - \code{"regressions"}, \code{"residuals"}, - \code{"residual.covariances"}, \code{"lv.variances"} or - \code{"lv.covariances"}, specifying the pattern of equality - constraints across multiple groups.} -\item{group.partial}{A vector of character strings containing the labels - of the parameters which should be free in all groups (thereby - overriding the group.equal argument for some specific parameters).} -\item{group.w.free}{Logical. If \code{TRUE}, the group frequencies are - considered to be free parameters in the model. In this case, a - Poisson model is fitted to estimate the group frequencies. If - \code{FALSE} (the default), the group frequencies are fixed to their - observed values.} \item{cluster}{Not used yet.} \item{constraints}{Additional (in)equality constraints not yet included in the model syntax. See \code{\link{model.syntax}} for more information.} -\item{estimator}{The estimator to be used. Can be one of the following: - \code{"ML"} for maximum likelihood, \code{"GLS"} for generalized least - squares, \code{"WLS"} for weighted least squares (sometimes called ADF - estimation), \code{"ULS"} for unweighted least squares and \code{"DWLS"} for - diagonally weighted least squares. These are the main options that affect - the estimation. For convenience, the \code{"ML"} option can be extended - as \code{"MLM"}, \code{"MLMV"}, \code{"MLMVS"}, \code{"MLF"}, and - \code{"MLR"}. The estimation will still be plain \code{"ML"}, but now - with robust standard errors and a robust (scaled) test statistic. For - \code{"MLM"}, \code{"MLMV"}, \code{"MLMVS"}, classic robust standard - errors are used (\code{se="robust.sem"}); for \code{"MLF"}, standard - errors are based on first-order derivatives (\code{se="first.order"}); - for \code{"MLR"}, `Huber-White' robust standard errors are used - (\code{se="robust.huber.white"}). In addition, \code{"MLM"} will compute - a Satorra-Bentler scaled (mean adjusted) test statistic - (\code{test="satorra.bentler"}) , \code{"MLMVS"} will compute a - mean and variance adjusted test statistic (Satterthwaite style) - (\code{test="mean.var.adjusted"}), \code{"MLMV"} will compute a mean - and variance adjusted test statistic (scaled and shifted) - (\code{test="scaled.shifted"}), and \code{"MLR"} will - compute a test statistic which is asymptotically - equivalent to the Yuan-Bentler T2-star test statistic. Analogously, - the estimators \code{"WLSM"} and \code{"WLSMV"} imply the \code{"DWLS"} - estimator (not the \code{"WLS"} estimator) with robust standard errors - and a mean or mean and variance adjusted test statistic. Estimators - \code{"ULSM"} and \code{"ULSMV"} imply the \code{"ULS"} - estimator with robust standard errors - and a mean or mean and variance adjusted test statistic.} -\item{likelihood}{Only relevant for ML estimation. If \code{"wishart"}, - the wishart likelihood approach is used. In this approach, the covariance - matrix has been divided by N-1, and both standard errors and test - statistics are based on N-1. - If \code{"normal"}, the normal likelihood approach is used. Here, - the covariance matrix has been divided by N, and both standard errors - and test statistics are based on N. If \code{"default"}, it depends - on the mimic option: if \code{mimic="lavaan"} or \code{mimic="Mplus"}, - normal likelihood is used; otherwise, wishart likelihood is used.} -\item{link}{Currently only used if estimator is MML. If \code{"logit"}, - a logit link is used for binary and ordered observed variables. - If \code{"probit"}, a probit link is used. If \code{"default"}, - it is currently set to \code{"probit"} (but this may change).} -\item{information}{If \code{"expected"}, the expected information matrix - is used (to compute the standard errors). If \code{"observed"}, the - observed information matrix is used. If \code{"default"}, the value is - set depending on the estimator and the mimic option.} -\item{se}{If \code{"standard"}, conventional standard errors - are computed based on inverting the (expected or observed) information - matrix. If \code{"first.order"}, standard errors are computed based on - first-order derivatives. If \code{"robust.sem"}, conventional robust - standard errors are computed. If \code{"robust.huber.white"}, - standard errors are computed based on the `mlr' (aka pseudo ML, - Huber-White) approach. - If \code{"robust"}, either \code{"robust.sem"} or - \code{"robust.huber.white"} is used depending on the estimator, - the mimic option, and whether the data are complete or not. - If \code{"boot"} or \code{"bootstrap"}, bootstrap standard errors are - computed using standard bootstrapping (unless Bollen-Stine bootstrapping - is requested for the test statistic; in this case bootstrap standard - errors are computed using model-based bootstrapping). - If \code{"none"}, no standard errors are computed.} -\item{test}{If \code{"standard"}, a conventional chi-square test is computed. - If \code{"Satorra.Bentler"}, a Satorra-Bentler scaled test statistic is - computed. If \code{"Yuan.Bentler"}, a Yuan-Bentler scaled test statistic - is computed. If \code{"mean.var.adjusted"} or \code{"Satterthwaite"}, a - mean and variance adjusted test statistic is compute. - If \code{"scaled.shifted"}, an alternative mean and variance adjusted test - statistic is computed (as in Mplus version 6 or higher). - If \code{"boot"} or \code{"bootstrap"} or - \code{"Bollen.Stine"}, the Bollen-Stine bootstrap is used to compute - the bootstrap probability value of the test statistic. - If \code{"default"}, the value depends on the - values of other arguments.} -\item{bootstrap}{Number of bootstrap draws, if bootstrapping is used.} -\item{mimic}{If \code{"Mplus"}, an attempt is made to mimic the Mplus - program. If \code{"EQS"}, an attempt is made to mimic the EQS program. - If \code{"default"}, the value is (currently) set to to \code{"lavaan"}, - which is very close to\code{"Mplus"}.} -\item{representation}{If \code{"LISREL"} the classical LISREL matrix - representation is used to represent the model (using the all-y variant).} -\item{do.fit}{If \code{FALSE}, the model is not fit, and the current - starting values of the model parameters are preserved.} -\item{control}{A list containing control parameters passed to the optimizer. - By default, lavaan uses \code{"nlminb"}. See the manpage of - \code{\link{nlminb}} for an overview of the control parameters. - A different optimizer can be chosen by setting the value of - \code{optim.method}. For unconstrained optimization (the model syntax - does not include any "==", ">" or "<" operators), - the available options are \code{"nlminb"} (the default), \code{"BFGS"} and - \code{"L-BFGS-B"}. See the manpage of the \code{\link{optim}} function for - the control parameters of the latter two options. For constrained - optimization, the only available option is \code{"nlminb.constr"}.} \item{WLS.V}{A user provided weight matrix to be used by estimator \code{"WLS"}; if the estimator is \code{"DWLS"}, only the diagonal of this matrix will be used. For a multiple group analysis, a list with a weight matrix @@ -253,59 +61,20 @@ For a multiple group analysis, a list with an asymptotic variance-covariance matrix for each group. See the \code{WLS.V} argument for information about the order of the elements.} -\item{zero.add}{A numeric vector containing two values. These values affect the - calculation of polychoric correlations when some frequencies in the - bivariate table are zero. - The first value only applies for 2x2 tables. The second value for larger - tables. This value is added to the zero frequency in the bivariate table. - If \code{"default"}, the value is set depending on the \code{"mimic"} - option. By default, lavaan uses \code{zero.add = c(0.5. 0.0)}.} -\item{zero.keep.margins}{Logical. This argument only affects the computation - of polychoric correlations for 2x2 tables with an empty cell, and where a - value is added to the empty cell. If \code{TRUE}, the other values of the - frequency table are adjusted so that all margins are unaffected. If - \code{"default"}, the value is set depending on the \code{"mimic"}. The - default is \code{TRUE}.} -\item{zero.cell.warn}{Logical. Only used if some observed endogenous variables - are categorical. If \code{TRUE}, give a warning if one or more cells - of a bivariate frequency table are empty.} -\item{start}{If it is a character string, - the two options are currently \code{"simple"} and \code{"Mplus"}. - In the first - case, all parameter values are set to zero, except the factor loadings - (set to one), the variances of latent variables (set to 0.05), and - the residual variances of observed variables (set to half the observed - variance). - If \code{"Mplus"}, we use a similar scheme, but the factor loadings are - estimated using the fabin3 estimator (tsls) per factor. - If \code{start} is a fitted - object of class \code{\linkS4class{lavaan}}, the estimated values of - the corresponding parameters will be extracted. If it is a model list, - for example the output of the \code{paramaterEstimates()} function, - the values of the \code{est} or \code{start} or \code{ustart} column - (whichever is found first) will be extracted.} \item{slotOptions}{Options slot from a fitted lavaan object. If provided, -no new Options slot will be created by this call.} + no new Options slot will be created by this call.} \item{slotParTable}{ParTable slot from a fitted lavaan object. If provided, -no new ParTable slot will be created by this call.} + no new ParTable slot will be created by this call.} \item{slotSampleStats}{SampleStats slot from a fitted lavaan object. -If provided, no new SampleStats slot will be created by this call.} + If provided, no new SampleStats slot will be created by this call.} \item{slotData}{Data slot from a fitted lavaan object. If provided, -no new Data slot will be created by this call.} + no new Data slot will be created by this call.} \item{slotModel}{Model slot from a fitted lavaan object. If provided, -no new Model slot will be created by this call.} + no new Model slot will be created by this call.} \item{slotCache}{Cache slot from a fitted lavaan object. If provided, -no new Cache slot will be created by this call.} -\item{check}{Character vector. If \code{check} includes \code{"start"}, - the starting values are checked for possibly inconsistent values (for - example values implying correlations larger than one); - if \code{check} includes \code{"post"}, a check is performed after - (post) fitting, to check if the solution is admissable.} -\item{verbose}{If \code{TRUE}, the function value is printed out during - each iteration.} -\item{warn}{If \code{TRUE}, some (possibly harmless) warnings are printed - out during the iterations.} -\item{debug}{If \code{TRUE}, debugging information is printed out.} + no new Cache slot will be created by this call.} +\item{...}{Many more additional options can be defined, using 'name = value'. + See \code{\link{lavOptions}} for a complete list.} } \value{ An object of class \code{\linkS4class{lavaan}}, for which several methods diff -Nru r-cran-lavaan-0.5.22/man/lav_constraints.Rd r-cran-lavaan-0.5.23.1097/man/lav_constraints.Rd --- r-cran-lavaan-0.5.22/man/lav_constraints.Rd 2015-03-29 13:35:28.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/lav_constraints.Rd 2016-12-30 17:09:14.000000000 +0000 @@ -1,10 +1,19 @@ \name{lav_constraints} \alias{lav_constraints_parse} +\alias{lav_partable_constraints_ceq} +\alias{lav_partable_constraints_ciq} +\alias{lav_partable_constraints_def} \title{Utility Functions: Constraints} \description{Utility functions for equality and inequality constraints.} \usage{ lav_constraints_parse(partable = NULL, constraints = NULL, theta = NULL, debug = FALSE) +lav_partable_constraints_ceq(partable, con = NULL, debug = FALSE, + txtOnly = FALSE) +lav_partable_constraints_ciq(partable, con = NULL, debug = FALSE, + txtOnly = FALSE) +lav_partable_constraints_def(partable, con = NULL, debug = FALSE, + txtOnly = FALSE) } \arguments{ \item{partable}{A lavaan parameter table.} @@ -12,6 +21,9 @@ \item{theta}{A numeric vector. Optional vector with values for the model parameters in the parameter table.} \item{debug}{Logical. If TRUE, show debugging information.} +\item{con}{An optional partable where the operator is one of `==', +`>', `<' or `:='} +\item{txtOnly}{Logical. If TRUE, only the body of the function is returned as a character string. If FALSE, a function is returned.} } \details{ This is a collection of lower-level constraint related functions @@ -20,8 +32,21 @@ they do: The \code{lav_constraints_parse} function parses the constraints -specification (provided as a string, see examples), and generates +specification (provided as a string, see example), and generates a list with useful information about the constraints. + +The \code{lav_partable_constraints_ceq} function creates a function +which takes the (unconstrained) parameter vector as input, and +returns the slack values for each equality constraint. If the equality +constraints hold perfectly, this function returns zeroes. + +The \code{lav_partable_constraints_ciq} function creates a function +which takes the (unconstrained) parameter vector as input, and +returns the slack values for each inequality constraint. + +The \code{lav_partable_constraints_def} function creates a function +which takes the (unconstrained) parameter vector as input, and +returns the computed values of the defined parameters. } \examples{ myModel <- 'x1 ~ a*x2 + b*x3 + c*x4' @@ -29,4 +54,11 @@ con <- ' a == 2*b b - c == 5 ' conInfo <- lav_constraints_parse(myParTable, constraints = con) + +myModel2 <- 'x1 ~ a*x2 + b*x3 + c*x4 + a == 2*b + b - c == 5 ' +ceq <- lav_partable_constraints_ceq(partable = lavaanify(myModel2)) +ceq( c(2,3,4) ) } + diff -Nru r-cran-lavaan-0.5.22/man/lavInspect.Rd r-cran-lavaan-0.5.23.1097/man/lavInspect.Rd --- r-cran-lavaan-0.5.22/man/lavInspect.Rd 2016-07-19 21:16:18.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/lavInspect.Rd 2017-02-21 16:41:28.000000000 +0000 @@ -6,20 +6,22 @@ \description{ The \code{lavInspect()} and \code{lavTech()} functions can be used to inspect/extract information that is stored inside (or can be computed from) a -fitted lavaan object. Note: the (older) S4 \code{inspect()} method is now a -shortcut for \code{lavInspect()} with default arguments. +fitted lavaan object. Note: the (older) \code{inspect()} function is +now simply a shortcut for \code{lavInspect()} with default arguments. } \usage{ -lavInspect(lavobject, what = "free", add.labels = TRUE, add.class = TRUE, +lavInspect(object, what = "free", add.labels = TRUE, add.class = TRUE, list.by.group = TRUE, drop.list.single.group = TRUE) -lavTech(lavobject, what = "free", add.labels = FALSE, add.class = FALSE, +lavTech(object, what = "free", add.labels = FALSE, add.class = FALSE, list.by.group = FALSE, drop.list.single.group = FALSE) + +inspect(object, what = "free", ...) } \arguments{ -\item{lavobject}{An object of class \code{\linkS4class{lavaan}}.} +\item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{what}{Character. What needs to be inspected/extracted? See Details for a full list. Note: the \code{what} argument is not case-sensitive (everything is converted to lower case.)} @@ -37,6 +39,7 @@ a list, where each element corresponds to a group (even if there is only a single group.) If \code{TRUE}, the list will be unlisted if there is only a single group.} +\item{...}{Additional arguments. Not used by lavaan, but by other packages.} } \details{ The \code{lavInspect()} and \code{lavTech()} functions only differ in the way @@ -107,6 +110,9 @@ the data.frame (if any).} \item{\code{"ngroups"}:}{Integer. The number of groups.} \item{\code{"group.label"}:}{A character vector. The group labels.} + \item{\code{"cluster"}:}{A character vector. The cluster variable(s) + in the data.frame (if any).} + \item{\code{"ordered"}:}{A character vector. The ordered variables.} \item{\code{"nobs"}:}{Integer vector. The number of observations in each group that were used in the analysis.} \item{\code{"norig"}:}{Integer vector. The original number of observations @@ -300,6 +306,8 @@ admissible. A warning is raised if negative variances are found, or if either \code{lavInspect(fit, "cov.lv")} or \code{lavInspect(fit, "theta")} return a non-positive definite matrix.} + \item{\code{"zero.cell.tables"}:}{List. List of bivariate frequency tables + where at least one cell is empty.} } } diff -Nru r-cran-lavaan-0.5.22/man/lavListInspect.Rd r-cran-lavaan-0.5.23.1097/man/lavListInspect.Rd --- r-cran-lavaan-0.5.22/man/lavListInspect.Rd 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/lavListInspect.Rd 2017-01-30 16:25:13.000000000 +0000 @@ -0,0 +1,128 @@ +\name{lavListInspect} +\alias{lavListInspect} +\alias{lavListTech} +\title{Inspect or extract information from a lavaanList object} +\description{ +The \code{lavListInspect()} and \code{lavListTech()} functions can be used to +inspect/extract information that is stored inside (or can be computed from) a +lavaanList object. +} +\usage{ +lavListInspect(object, what = "free", add.labels = TRUE, + add.class = TRUE, list.by.group = TRUE, + drop.list.single.group = TRUE) + +lavListTech(object, what = "free", add.labels = FALSE, + add.class = FALSE, list.by.group = FALSE, + drop.list.single.group = FALSE) +} +\arguments{ +\item{object}{An object of class \code{\linkS4class{lavaanList}}.} +\item{what}{Character. What needs to be inspected/extracted? See Details for a +full list. Note: the \code{what} argument is not case-sensitive +(everything is converted to lower case.)} +\item{add.labels}{If \code{TRUE}, variable names are added to the vectors +and/or matrices.} +\item{add.class}{If \code{TRUE}, vectors are given the `lavaan.vector' class; +matrices are given the `lavaan.matrix' class, and symmetric matrices are +given the `lavaan.matrix.symmetric' class. This only affects the way they +are printed on the screen.} +\item{list.by.group}{Logical. Only used when the output are model matrices. +If \code{TRUE}, the model matrices are nested within groups. If \code{FALSE}, +a flattened list is returned containing all model matrices, with repeated +names for multiple groups.} +\item{drop.list.single.group}{If \code{FALSE}, the results are returned as + a list, where each element corresponds to a group (even if there is only + a single group.) If \code{TRUE}, the list will be unlisted if there is + only a single group.} +} +\details{ +The \code{lavListInspect()} and \code{lavListTech()} functions only differ in +the way they return the results. The \code{lavListInspect()} function will +prettify the output by default, while the \code{lavListTech()} will not attempt +to prettify the output by default. + +Below is a list of possible values for the \code{what} argument, organized +in several sections: + +Model matrices: + +\describe{ + \item{\code{"free"}:}{A list of model matrices. The non-zero integers + represent the free parameters. The numbers themselves correspond + to the position of the free parameter in the parameter vector. + This determines the order of the model parameters in the output + of for example \code{coef()} and \code{vcov()}.} + \item{\code{"partable"}:}{A list of model matrices. The non-zero integers + represent both the fixed parameters (for example, factor loadings + fixed at 1.0), and the free parameters if we ignore any equality + constraints. They correspond with all entries (fixed or free) + in the parameter table. See \code{\link{parTable}}.} + \item{\code{"start"}:}{A list of model matrices. The values represent + the starting values for all model parameters. + Alias: \code{"starting.values"}.} +} + +Information about the data (including missing patterns): + +\describe{ + \item{\code{"group"}:}{A character string. The group variable in + the data.frame (if any).} + \item{\code{"ngroups"}:}{Integer. The number of groups.} + \item{\code{"group.label"}:}{A character vector. The group labels.} + \item{\code{"cluster"}:}{A character vector. The cluster variable(s) + in the data.frame (if any).} + \item{\code{"ordered"}:}{A character vector. The ordered variables.} + \item{\code{"nobs"}:}{Integer vector. The number of observations + in each group that were used in the analysis (in each dataset).} + \item{\code{"norig"}:}{Integer vector. The original number of observations + in each group (in each dataset).} + \item{\code{"ntotal"}:}{Integer. The total number of observations that + were used in the analysis. If there is just a single group, this + is the same as the \code{"nobs"} option; if there are multiple groups, + this is the sum of the \code{"nobs"} numbers for each group + (in each dataset).} +} + +Model features: + +\describe{ + \item{\code{"meanstructure"}:}{Logical. \code{TRUE} if a meanstructure + was included in the model.} + \item{\code{"categorical"}:}{Logical. \code{TRUE} if categorical endogenous + variables were part of the model.} + \item{\code{"fixed.x"}:}{Logical. \code{TRUE} if the exogenous x-covariates + are treated as fixed.} + \item{\code{"parameterization"}:}{Character. Either \code{"delta"} or + \code{"theta"}.} +} + +\describe{ + \item{\code{"list"}:}{The parameter table. The same output as given + by \code{parTable()}.} + \item{\code{"options"}:}{List. The option list.} + \item{\code{"call"}:}{List. The call as returned by match.call, coerced to + a list.} +} + +} +\seealso{ +\code{\link{lavaanList}} +} +\examples{ +# fit model +HS.model <- ' visual =~ x1 + x2 + x3 + textual =~ x4 + x5 + x6 + speed =~ x7 + x8 + x9 ' + +# a data generating function +generateData <- function() simulateData(HS.model, sample.nobs = 100) + +set.seed(1234) +fit <- semList(HS.model, dataFunction = generateData, ndat = 5, + store.slots = "partable") + +# extract information +lavListInspect(fit, "free") +lavListTech(fit, "free") +} diff -Nru r-cran-lavaan-0.5.22/man/lav_model.Rd r-cran-lavaan-0.5.23.1097/man/lav_model.Rd --- r-cran-lavaan-0.5.22/man/lav_model.Rd 2015-09-25 15:25:41.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/lav_model.Rd 2017-01-25 20:21:21.000000000 +0000 @@ -7,7 +7,7 @@ \description{Utility functions related to internal model representation (lavmodel)} \usage{ # set/get free parameters -lav_model_set_parameters(lavmodel, x = NULL, estimator = "ML") +lav_model_set_parameters(lavmodel, x = NULL) lav_model_get_parameters(lavmodel, GLIST = NULL, type = "free", extra = TRUE) @@ -20,8 +20,6 @@ \arguments{ \item{lavmodel}{An internal representation of a lavaan model.} \item{x}{Numeric.}{A vector containing the values of all the free model parameters.} -\item{estimator}{Character string.}{The estimator we should assume has been -used.} \item{GLIST}{List. A list of model matrices, similar to the output of \code{lavInspect(object, "est")}.} \item{type}{Character string. If \code{"free"}, only return the free model diff -Nru r-cran-lavaan-0.5.22/man/lavNames.Rd r-cran-lavaan-0.5.23.1097/man/lavNames.Rd --- r-cran-lavaan-0.5.22/man/lavNames.Rd 2016-07-19 20:41:59.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/lavNames.Rd 2017-02-20 08:22:49.000000000 +0000 @@ -5,22 +5,22 @@ \description{ Extract variables names from a fitted lavaan object.} \usage{ -lavNames(object, type = "ov", group = NULL) +lavNames(object, type = "ov", ...) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{type}{Character. The type of variables whose names should be extracted. See details for a complete list.} -\item{group}{If \code{NULL}, -all groups (if any) are used. If an integer (vector), only names from those -groups are extracted. The group numbers are found in the \code{group} -column of the parameter table.} +\item{...}{Additional selection variables. For example \code{"group = 2L"} +(in a multiple-group analysis) only considers the variables included +in the model for the second group.} } \details{ The order of the variable names, as returned by \code{lavNames} determines the order in which the variables are listed in the parameter table, and therefore also in the summary output. + The following variable types are available: \itemize{ \item \code{"ov"}: observed variables diff -Nru r-cran-lavaan-0.5.22/man/lavOptions.Rd r-cran-lavaan-0.5.23.1097/man/lavOptions.Rd --- r-cran-lavaan-0.5.22/man/lavOptions.Rd 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/lavOptions.Rd 2017-01-28 12:39:12.000000000 +0000 @@ -0,0 +1,321 @@ +\name{lavOptions} +\alias{lavOptions} +\alias{lavoptions} +\title{lavaan Options} +\description{ +Show the default options used by the \code{lavaan()} function. The +options can be changed by passing 'name = value' arguments to the +\code{lavaan()} function call, where they will be added to the '...' +argument. +} +\usage{ +lavOptions(x = NULL, default = NULL, mimic = "lavaan") +} +\arguments{ +\item{x}{Character. A character string holding an option name, or a character +string vector holding multiple option names. All option names are converted to +lower case.} +\item{default}{If a single option is specified but not available, this value +is returned.} +\item{mimic}{Character. Adjust the option list for this mimic flag.} +} +\details{ +This is the full list of options that are accepted by the \code{lavaan()} +function, organized in several sections: + + +Model features (always available): + +\describe{ + \item{\code{meanstructure}:}{If \code{TRUE}, the means of the observed + variables enter the model. If \code{"default"}, the value is set based + on the user-specified model, and/or the values of other arguments.} + \item{\code{int.ov.free}:}{If \code{FALSE}, the intercepts of the + observed variables are fixed to zero.} + \item{\code{int.lv.free}:}{If \code{FALSE}, the intercepts of the latent + variables are fixed to zero.} + \item{\code{conditional.x}:}{If \code{TRUE}, we set up the model + conditional on the exogenous `x' covariates; the model-implied sample + statistics only include the non-x variables. If \code{FALSE}, the + exogenous `x' variables are modeled jointly with the other variables, and + the model-implied statistics refect both sets of variables. If + \code{"default"}, the value is set depending on the estimator, and + whether or not the model involves categorical endogenous variables.} + \item{\code{fixed.x}:}{If \code{TRUE}, the exogenous `x' covariates are + considered fixed variables and the means, variances and covariances of + these variables are fixed to their sample values. If \code{FALSE}, they + are considered random, and the means, variances and covariances are free + parameters. If \code{"default"}, the value is set depending on the mimic + option.} + \item{\code{orthogonal}:}{If \code{TRUE}, the exogenous latent variables + are assumed to be uncorrelated.} + \item{\code{std.lv}:}{If \code{TRUE}, the metric of each latent variable + is determined by fixing their (residual) variances to 1.0. If + \code{FALSE}, the metric of each latent variable is determined by fixing + the factor loading of the first indicator to 1.0.} + \item{\code{parameterization}:}{Currently only used if data is + categorical. If \code{"delta"}, the delta parameterization is used. + If \code{"theta"}, the theta parameterization is used.} +} + +Model features (only available for the \code{lavaan()} function): + +\describe{ + \item{\code{auto.fix.first}:}{If \code{TRUE}, the factor loading of the + first indicator is set to 1.0 for every latent variable.} + \item{\code{auto.fix.single}:}{If \code{TRUE}, the residual variance (if + included) of an observed indicator is set to zero if it is the only + indicator of a latent variable.} + \item{\code{auto.var}:}{If \code{TRUE}, the residual variances and the + variances of exogenous latent variables are included in the model and + set free.} + \item{\code{auto.cov.lv.x}:}{If \code{TRUE}, the covariances of exogenous + latent variables are included in the model and set free.} + \item{\code{auto.cov.y}:}{If \code{TRUE}, the covariances of dependent + variables (both observed and latent) are included in the model and set + free.} + \item{\code{auto.th}:}{If \code{TRUE}, thresholds for limited dependent + variables are included in the model and set free.} + \item{\code{auto.delta}:}{If \code{TRUE}, response scaling parameters + for limited dependent variables are included in the model and set free.} +} + +Data options: + +\describe{ + \item{\code{std.ov}:}{If \code{TRUE}, all observed variables are + standardized before entering the analysis.} + \item{\code{missing}:}{If \code{"listwise"}, cases with missing values + are removed listwise from the data frame before analysis. If + \code{direct} or \code{"ml"} or \code{"fiml"} and the estimator is + maximum likelihood, Full Information Maximum Likelihood (FIML) + estimation is used using all available data in the data frame. This is + only valid if the data are missing completely at random (MCAR) or + missing at random (MAR). If \code{"default"}, the value is set depending + on the estimator and the mimic option.} +} + +Data summary options: + +\describe{ + \item{\code{sample.cov.rescale}:}{If \code{TRUE}, the sample covariance + matrix provided by the user is internally rescaled by multiplying it + with a factor (N-1)/N. If \code{"default"}, the value is set depending + on the estimator and the likelihood option: it is set to \code{TRUE} if + maximum likelihood estimation is used and \code{likelihood="normal"}, + and \code{FALSE} otherwise.} + \item{\code{ridge}:}{Numeric. Small constant used for ridging. Only used + if the sample covariance matrix is non positive definite.} +} + +Multiple group options: + +\describe{ + \item{\code{group.label}:}{A character vector. The user can specify + which group (or factor) levels need to be selected from the grouping + variable, and in which order. If missing, all grouping levels are + selected, in the order as they appear in the data.} + \item{\code{group.equal}:}{A vector of character strings. Only used in + a multiple group analysis. Can be one or more of the following: + \code{"loadings"}, \code{"intercepts"}, \code{"means"}, + \code{"thresholds"}, \code{"regressions"}, \code{"residuals"}, + \code{"residual.covariances"}, \code{"lv.variances"} or + \code{"lv.covariances"}, specifying the pattern of equality + constraints across multiple groups.} + \item{\code{group.partial}:}{A vector of character strings containing + the labels of the parameters which should be free in all groups (thereby + overriding the group.equal argument for some specific parameters).} + \item{\code{group.w.free}:}{Logical. If \code{TRUE}, the group + frequencies are considered to be free parameters in the model. In this + case, a Poisson model is fitted to estimate the group frequencies. If + \code{FALSE} (the default), the group frequencies are fixed to their + observed values.} +} + +Estimation options: + +\describe{ + \item{\code{estimator}:}{The estimator to be used. Can be one of the + following: \code{"ML"} for maximum likelihood, \code{"GLS"} for + generalized least squares, \code{"WLS"} for weighted least squares + (sometimes called ADF estimation), \code{"ULS"} for unweighted least + squares and \code{"DWLS"} for diagonally weighted least squares. These + are the main options that affect the estimation. For convenience, the + \code{"ML"} option can be extended as \code{"MLM"}, \code{"MLMV"}, + \code{"MLMVS"}, \code{"MLF"}, and \code{"MLR"}. + The estimation will still be plain \code{"ML"}, but now + with robust standard errors and a robust (scaled) test statistic. For + \code{"MLM"}, \code{"MLMV"}, \code{"MLMVS"}, classic robust standard + errors are used (\code{se="robust.sem"}); for \code{"MLF"}, standard + errors are based on first-order derivatives (\code{se="first.order"}); + for \code{"MLR"}, `Huber-White' robust standard errors are used + (\code{se="robust.huber.white"}). In addition, \code{"MLM"} will compute + a Satorra-Bentler scaled (mean adjusted) test statistic + (\code{test="satorra.bentler"}) , \code{"MLMVS"} will compute a + mean and variance adjusted test statistic (Satterthwaite style) + (\code{test="mean.var.adjusted"}), \code{"MLMV"} will compute a mean + and variance adjusted test statistic (scaled and shifted) + (\code{test="scaled.shifted"}), and \code{"MLR"} will + compute a test statistic which is asymptotically + equivalent to the Yuan-Bentler T2-star test statistic. Analogously, + the estimators \code{"WLSM"} and \code{"WLSMV"} imply the \code{"DWLS"} + estimator (not the \code{"WLS"} estimator) with robust standard errors + and a mean or mean and variance adjusted test statistic. Estimators + \code{"ULSM"} and \code{"ULSMV"} imply the \code{"ULS"} + estimator with robust standard errors + and a mean or mean and variance adjusted test statistic.} + \item{\code{likelihood}:}{Only relevant for ML estimation. If + \code{"wishart"}, the wishart likelihood approach is used. In this + approach, the covariance matrix has been divided by N-1, and both + standard errors and test statistics are based on N-1. + If \code{"normal"}, the normal likelihood approach is used. Here, + the covariance matrix has been divided by N, and both standard errors + and test statistics are based on N. If \code{"default"}, it depends + on the mimic option: if \code{mimic="lavaan"} or \code{mimic="Mplus"}, + normal likelihood is used; otherwise, wishart likelihood is used.} + \item{\code{link}:}{Currently only used if estimator is MML. + If \code{"logit"}, a logit link is used for binary and ordered observed + variables. If \code{"probit"}, a probit link is used. If + \code{"default"}, it is currently set to \code{"probit"} (but this may + change).} + \item{\code{information}:}{If \code{"expected"}, the expected + information matrix is used (to compute the standard errors). If + \code{"observed"}, the observed information matrix is used. If + \code{"default"}, the value is set depending on the estimator and the + mimic option.} + \item{\code{se}:}{If \code{"standard"}, conventional standard errors + are computed based on inverting the (expected or observed) information + matrix. If \code{"first.order"}, standard errors are computed based on + first-order derivatives. If \code{"robust.sem"}, conventional robust + standard errors are computed. If \code{"robust.huber.white"}, + standard errors are computed based on the `mlr' (aka pseudo ML, + Huber-White) approach. + If \code{"robust"}, either \code{"robust.sem"} or + \code{"robust.huber.white"} is used depending on the estimator, + the mimic option, and whether the data are complete or not. + If \code{"boot"} or \code{"bootstrap"}, bootstrap standard errors are + computed using standard bootstrapping (unless Bollen-Stine bootstrapping + is requested for the test statistic; in this case bootstrap standard + errors are computed using model-based bootstrapping). + If \code{"none"}, no standard errors are computed.} + \item{\code{test}:}{If \code{"standard"}, a conventional chi-square test + is computed. If \code{"Satorra.Bentler"}, a Satorra-Bentler scaled test + statistic is computed. If \code{"Yuan.Bentler"}, a Yuan-Bentler scaled + test statistic is computed. If \code{"mean.var.adjusted"} or + \code{"Satterthwaite"}, a mean and variance adjusted test statistic is + compute. If \code{"scaled.shifted"}, an alternative mean and variance + adjusted test statistic is computed (as in Mplus version 6 or higher). + If \code{"boot"} or \code{"bootstrap"} or \code{"Bollen.Stine"}, the + Bollen-Stine bootstrap is used to compute the bootstrap probability value + of the test statistic. If \code{"default"}, the value depends on the + values of other arguments.} + \item{\code{bootstrap}:}{Number of bootstrap draws, if bootstrapping is + used.} + \item{\code{do.fit}:}{If \code{FALSE}, the model is not fit, and the + current starting values of the model parameters are preserved.} +} + +Optimization options: + +\describe{ + \item{\code{control}:}{A list containing control parameters passed to + the external optimizer. By default, lavaan uses \code{"nlminb"}. + See the manpage of \code{\link{nlminb}} for an overview of the control + parameters. If another (external) optimizer is selected, see the + manpage for that optimizer to see the possible control parameters.} + \item{\code{optim.method}:}{Character. The optimizer that should be + used. For unconstrained optimization (the model syntax + does not include any "==", ">" or "<" operators), + the available options are \code{"nlminb"} (the default), \code{"BFGS"} and + \code{"L-BFGS-B"}. For constrained + optimization, the only available option is \code{"nlminb.constr"}.} +} + +Categorical estimation options: + +\describe{ + \item{\code{zero.add}:}{A numeric vector containing two values. These + values affect the calculation of polychoric correlations when some + frequencies in the bivariate table are zero. The first value only + applies for 2x2 tables. The second value for larger tables. This value + is added to the zero frequency in the bivariate table. If + \code{"default"}, the value is set depending on the \code{"mimic"} + option. By default, lavaan uses \code{zero.add = c(0.5. 0.0)}.} + \item{\code{zero.keep.margins}:}{Logical. This argument only affects + the computation of polychoric correlations for 2x2 tables with an empty + cell, and where a value is added to the empty cell. If \code{TRUE}, the + other values of the frequency table are adjusted so that all margins are + unaffected. If \code{"default"}, the value is set depending on the + \code{"mimic"}. The default is \code{TRUE}.} + \item{\code{zero.cell.warn}:}{Logical. Only used if some observed + endogenous variables are categorical. If \code{TRUE}, give a warning if + one or more cells of a bivariate frequency table are empty.} +} + +Starting values options: + +\describe{ + \item{\code{start}:}{If it is a character string, the two options are + currently \code{"simple"} and \code{"Mplus"}. In the first case, all + parameter values are set to zero, except the factor loadings (set to + one), the variances of latent variables (set to 0.05), and the residual + variances of observed variables (set to half the observed variance). + If \code{"Mplus"}, we use a similar scheme, but the factor loadings are + estimated using the fabin3 estimator (tsls) per factor. + If \code{start} is a fitted object of class \code{\linkS4class{lavaan}}, + the estimated values of the corresponding parameters will be extracted. + If it is a model list, for example the output of the + \code{paramaterEstimates()} function, the values of the \code{est} or + \code{start} or \code{ustart} column (whichever is found first) will be + extracted.} +} + +Check options: + +\describe{ + \item{\code{check}:}{Character vector. If \code{check} includes + \code{"start"}, the starting values are checked for possibly + inconsistent values (for example values implying correlations larger + than one); if \code{check} includes \code{"post"}, a check is performed + after (post) fitting, to check if the solution is admissable.} +} + +Verbosity options: + +\describe{ + \item{\code{verbose}:}{If \code{TRUE}, the function value is printed out + during each iteration.} + \item{\code{warn}:}{If \code{TRUE}, some (possibly harmless) warnings + are printed out during the iterations.} + \item{\code{debug}:}{If \code{TRUE}, debugging information is printed + out.} +} + +Miscellaneous: + +\describe{ + \item{\code{model.type}:}{Set the model type: possible values + are \code{"cfa"}, \code{"sem"} or \code{"growth"}. This may affect + how starting values are computed, and may be used to alter the terminology + used in the summary output, or the layout of path diagrams that are + based on a fitted lavaan object.} + \item{\code{mimic}:}{If \code{"Mplus"}, an attempt is made to mimic the + Mplus program. If \code{"EQS"}, an attempt is made to mimic the EQS + program. If \code{"default"}, the value is (currently) set to to + \code{"lavaan"}, which is very close to \code{"Mplus"}.} + \item{\code{representation}:}{If \code{"LISREL"} the classical LISREL + matrix representation is used to represent the model (using the all-y + variant). No other options are available (for now).} +} + +} + +\seealso{ +\code{\link{lavaan}} +} + +\examples{ +lavOptions() +lavOptions("std.lv") +lavOptions(c("std.lv", "orthogonal")) +} diff -Nru r-cran-lavaan-0.5.22/man/lav_partable.Rd r-cran-lavaan-0.5.23.1097/man/lav_partable.Rd --- r-cran-lavaan-0.5.22/man/lav_partable.Rd 2016-04-26 19:09:06.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/lav_partable.Rd 2017-02-20 16:36:27.000000000 +0000 @@ -13,8 +13,8 @@ \description{Utility functions related to the parameter table (partable)} \usage{ # extract information from a parameter table -lav_partable_df(partable, group = NULL) -lav_partable_ndat(partable, group = NULL) +lav_partable_df(partable) +lav_partable_ndat(partable) lav_partable_npar(partable) lav_partable_attributes(partable, pta = NULL) @@ -43,7 +43,6 @@ \arguments{ \item{partable}{A parameter table. see \code{\link{lavParTable}} for more information.} -\item{group}{Integer. If non-null, only consider this group.} \item{blocks}{Character vector. Which columns in the parameter table should be taken to distinguish between different blocks/groups of parameters (and hence be given different labels)?} diff -Nru r-cran-lavaan-0.5.22/man/mplus2lavaan.modelSyntax.Rd r-cran-lavaan-0.5.23.1097/man/mplus2lavaan.modelSyntax.Rd --- r-cran-lavaan-0.5.22/man/mplus2lavaan.modelSyntax.Rd 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/mplus2lavaan.modelSyntax.Rd 2016-11-09 14:35:48.000000000 +0000 @@ -0,0 +1,33 @@ +\name{mplus2lavaan.modelSyntax} +\alias{mplus2lavaan.modelSyntax} +\title{Convert Mplus model syntax to lavaan} +\description{ +Converts Mplus model syntax into lavaan model syntax.} +\usage{ +mplus2lavaan.modelSyntax(syntax) +} +\arguments{ +\item{syntax}{A character vector containing Mplus model syntax to be + converted to lavaan model syntax. Note that parsing Mplus syntax often + requires correct usage of newline characters. If \code{syntax} is a + vector of multiple strings, these will be joined with newlines prior + to conversion. Alternatively, \code{\\n} characters can be included + inline in \code{syntax}.} +} +\value{ +A character string of converted \code{lavaan} model syntax. +} +\author{Michael Hallquist} +\seealso{\code{\link{mplus2lavaan}}.} +\examples{ +\dontrun{ +syntax <- ' + f1 BY x1*1 x2 x3; + x1 WITH x2; + x3 (1); + x2 (1); +' +lavSyntax <- mplus2lavaan.modelSyntax(syntax) +cat(lavSyntax) +} +} diff -Nru r-cran-lavaan-0.5.22/man/mplus2lavaan.Rd r-cran-lavaan-0.5.23.1097/man/mplus2lavaan.Rd --- r-cran-lavaan-0.5.22/man/mplus2lavaan.Rd 2013-05-08 13:05:16.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/mplus2lavaan.Rd 2016-12-30 17:17:07.000000000 +0000 @@ -6,23 +6,26 @@ Read in an Mplus input file, convert it to lavaan syntax, and fit the model.} \usage{ -mplus2lavaan(inpfile) +mplus2lavaan(inpfile, run = TRUE) } \arguments{ \item{inpfile}{The filename (including a full path) of the Mplus input file. The data (as referred to in the Mplus input file) should be in the same directory as the Mplus input file.} +\item{run}{Whether to run the specified Mplus input syntax (\code{TRUE}) or +only to parse and convert the syntax (\code{FALSE}).} } \value{ -A list with two elements: \code{mplus.inp} contains the input data, a title, -the variable names, and the converted (lavaan) model syntax; \code{lav.out} -contains the fitted lavaan object. +A \code{lavaan} object with the fitted results of the Mplus model. The parsed +and converted Mplus syntax is preserved in the \code{@external} slot of the \code{lavaan} +object in the \code{$mplus.inp} element. If \code{run} is \code{FALSE}, a \code{list} of converted +syntax is returned. } \author{Michael Hallquist} \seealso{\code{\link{lavExport}}.} \examples{ \dontrun{ out <- mplus2lavaan("ex5.1.inp") -summary(out$lav.out) +summary(out) } } diff -Nru r-cran-lavaan-0.5.22/man/sem.Rd r-cran-lavaan-0.5.23.1097/man/sem.Rd --- r-cran-lavaan-0.5.22/man/sem.Rd 2016-09-03 09:25:01.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/sem.Rd 2017-01-29 10:23:31.000000000 +0000 @@ -4,25 +4,11 @@ \description{ Fit a Structural Equation Model (SEM).} \usage{ -sem(model = NULL, data = NULL, - meanstructure = "default", - conditional.x = "default", fixed.x = "default", - orthogonal = FALSE, std.lv = FALSE, - parameterization = "default", std.ov = FALSE, - missing = "default", ordered = NULL, - sample.cov = NULL, sample.cov.rescale = "default", - sample.mean = NULL, sample.nobs = NULL, - ridge = 1e-05, group = NULL, - group.label = NULL, group.equal = "", group.partial = "", - group.w.free = FALSE, cluster = NULL, constraints = '', - estimator = "default", likelihood = "default", link = "default", - information = "default", se = "default", test = "default", - bootstrap = 1000L, mimic = "default", representation = "default", - do.fit = TRUE, control = list(), WLS.V = NULL, NACOV = NULL, - zero.add = "default", zero.keep.margins = "default", - zero.cell.warn = TRUE, start = "default", - check = c("start", "post"), - verbose = FALSE, warn = TRUE, debug = FALSE) +sem(model = NULL, data = NULL, ordered = NULL, + sample.cov = NULL, sample.mean = NULL, sample.nobs = NULL, + group = NULL, cluster = NULL, + constraints = "", WLS.V = NULL, NACOV = NULL, + ...) } \arguments{ \item{model}{A description of the user-specified model. Typically, the model @@ -33,41 +19,6 @@ \item{data}{An optional data frame containing the observed variables used in the model. If some variables are declared as ordered factors, lavaan will treat them as ordinal variables.} -\item{meanstructure}{If \code{TRUE}, the means of the observed - variables enter the model. If \code{"default"}, the value is set based - on the user-specified model, and/or the values of other arguments.} -\item{conditional.x}{If \code{TRUE}, we set up the model conditional on - the exogenous `x' covariates; the model-implied sample statistics - only include the non-x variables. If \code{FALSE}, the exogenous `x' - variables are modeled jointly with the other variables, and the - model-implied statistics refect both sets of variables. If - \code{"default"}, the value is set depending on the estimator, and - whether or not the model involves categorical endogenous variables.} -\item{fixed.x}{If \code{TRUE}, the exogenous `x' covariates are considered - fixed variables and the means, variances and covariances of these variables - are fixed to their sample values. If \code{FALSE}, they are considered - random, and the means, variances and covariances are free parameters. If - \code{"default"}, the value is set depending on the mimic option.} -\item{orthogonal}{If \code{TRUE}, the exogenous latent variables - are assumed to be uncorrelated.} -\item{std.lv}{If \code{TRUE}, the metric of each latent variable is - determined by fixing their (residual) - variances to 1.0. If \code{FALSE}, the metric - of each latent variable is determined by fixing the factor loading of the - first indicator to 1.0.} -\item{parameterization}{Currently only used if data is categorical. If - \code{"delta"}, the delta parameterization is used. If \code{"theta"}, - the theta parameterization is used.} -\item{std.ov}{If \code{TRUE}, all observed variables are standardized - before entering the analysis.} -\item{missing}{If \code{"listwise"}, cases with missing values are removed - listwise from the data frame before analysis. If \code{"direct"} or - \code{"ml"} or \code{"fiml"} and the estimator is maximum likelihood, - Full Information Maximum Likelihood (FIML) estimation is used using all - available data in the data frame. This is only valid if the data are - missing completely at random (MCAR) or missing at random (MAR). If - \code{"default"}, the value is set depending on the estimator and the - mimic option.} \item{ordered}{Character vector. Only used if the data is in a data.frame. Treat these variables as ordered (ordinal) variables, if they are endogenous in the model. Importantly, all other variables will be treated @@ -80,133 +31,16 @@ internally rescaled by multiplying it with a factor (N-1)/N, to ensure that the covariance matrix has been divided by N. This can be turned off by setting the \code{sample.cov.rescale} argument to \code{FALSE}.} -\item{sample.cov.rescale}{If \code{TRUE}, the sample covariance matrix provided - by the user is internally rescaled by multiplying it with a factor (N-1)/N. - If \code{"default"}, the value is set depending on the estimator and the - likelihood option: it is set to \code{TRUE} if maximum likelihood - estimation is used and \code{likelihood="normal"}, and \code{FALSE} - otherwise.} \item{sample.mean}{A sample mean vector. For a multiple group analysis, a list with a mean vector for each group.} \item{sample.nobs}{Number of observations if the full data frame is missing and only sample moments are given. For a multiple group analysis, a list or a vector with the number of observations for each group.} -\item{ridge}{Numeric. Small constant used for ridging. Only used if the sample covariance matrix is non positive definite.} \item{group}{A variable name in the data frame defining the groups in a multiple group analysis.} -\item{group.label}{A character vector. The user can specify which group (or -factor) levels need to be selected from the grouping variable, and in which -order. If \code{NULL} (the default), all grouping levels are selected, in the -order as they appear in the data.} -\item{group.equal}{A vector of character strings. Only used in - a multiple group analysis. Can be one or more of the following: - \code{"loadings"}, \code{"intercepts"}, \code{"means"}, \code{"thresholds"}, - \code{"regressions"}, \code{"residuals"}, - \code{"residual.covariances"}, \code{"lv.variances"} or - \code{"lv.covariances"}, specifying the pattern of equality - constraints across multiple groups.} -\item{group.partial}{A vector of character strings containing the labels - of the parameters which should be free in all groups (thereby - overriding the group.equal argument for some specific parameters).} -\item{group.w.free}{Logical. If \code{TRUE}, the group frequencies are - considered to be free parameters in the model. In this case, a - Poisson model is fitted to estimate the group frequencies. If - \code{FALSE} (the default), the group frequencies are fixed to their - observed values.} \item{cluster}{Not used yet.} \item{constraints}{Additional (in)equality constraints not yet included in the model syntax. See \code{\link{model.syntax}} for more information.} -\item{estimator}{The estimator to be used. Can be one of the following: - \code{"ML"} for maximum likelihood, \code{"GLS"} for generalized least - squares, \code{"WLS"} for weighted least squares (sometimes called ADF - estimation), \code{"ULS"} for unweighted least squares and \code{"DWLS"} for - diagonally weighted least squares. These are the main options that affect - the estimation. For convenience, the \code{"ML"} option can be extended - as \code{"MLM"}, \code{"MLMV"}, \code{"MLMVS"}, \code{"MLF"}, and - \code{"MLR"}. The estimation will still be plain \code{"ML"}, but now - with robust standard errors and a robust (scaled) test statistic. For - \code{"MLM"}, \code{"MLMV"}, \code{"MLMVS"}, classic robust standard - errors are used (\code{se="robust.sem"}); for \code{"MLF"}, standard - errors are based on first-order derivatives (\code{se="first.order"}); - for \code{"MLR"}, `Huber-White' robust standard errors are used - (\code{se="robust.huber.white"}). In addition, \code{"MLM"} will compute - a Satorra-Bentler scaled (mean adjusted) test statistic - (\code{test="satorra.bentler"}) , \code{"MLMVS"} will compute a - mean and variance adjusted test statistic (Satterthwaite style) - (\code{test="mean.var.adjusted"}), \code{"MLMV"} will compute a mean - and variance adjusted test statistic (scaled and shifted) - (\code{test="scaled.shifted"}), and \code{"MLR"} will - compute a test statistic which is asymptotically - equivalent to the Yuan-Bentler T2-star test statistic. Analogously, - the estimators \code{"WLSM"} and \code{"WLSMV"} imply the \code{"DWLS"} - estimator (not the \code{"WLS"} estimator) with robust standard errors - and a mean or mean and variance adjusted test statistic. Estimators - \code{"ULSM"} and \code{"ULSMV"} imply the \code{"ULS"} - estimator with robust standard errors - and a mean or mean and variance adjusted test statistic.} -\item{likelihood}{Only relevant for ML estimation. If \code{"wishart"}, - the wishart likelihood approach is used. In this approach, the covariance - matrix has been divided by N-1, and both standard errors and test - statistics are based on N-1. - If \code{"normal"}, the normal likelihood approach is used. Here, - the covariance matrix has been divided by N, and both standard errors - and test statistics are based on N. If \code{"default"}, it depends - on the mimic option: if \code{mimic="lavaan"} or \code{mimic="Mplus"}, - normal likelihood is used; otherwise, wishart likelihood is used.} -\item{link}{Currently only used if estimator is MML. If \code{"logit"}, - a logit link is used for binary and ordered observed variables. - If \code{"probit"}, a probit link is used. If \code{"default"}, - it is currently set to \code{"probit"} (but this may change).} -\item{information}{If \code{"expected"}, the expected information matrix - is used (to compute the standard errors). If \code{"observed"}, the - observed information matrix is used. If \code{"default"}, the value is - set depending on the estimator and the mimic option.} -\item{se}{If \code{"standard"}, conventional standard errors - are computed based on inverting the (expected or observed) information - matrix. If \code{"first.order"}, standard errors are computed based on - first-order derivatives. If \code{"robust.sem"}, conventional robust - standard errors are computed. If \code{"robust.huber.white"}, - standard errors are computed based on the `mlr' (aka pseudo ML, - Huber-White) approach. - If \code{"robust"}, either \code{"robust.sem"} or - \code{"robust.huber.white"} is used depending on the estimator, - the mimic option, and whether the data are complete or not. - If \code{"boot"} or \code{"bootstrap"}, bootstrap standard errors are - computed using standard bootstrapping (unless Bollen-Stine bootstrapping - is requested for the test statistic; in this case bootstrap standard - errors are computed using model-based bootstrapping). - If \code{"none"}, no standard errors are computed.} -\item{test}{If \code{"standard"}, a conventional chi-square test is computed. - If \code{"Satorra.Bentler"}, a Satorra-Bentler scaled test statistic is - computed. If \code{"Yuan.Bentler"}, a Yuan-Bentler scaled test statistic - is computed. If \code{"mean.var.adjusted"} or \code{"Satterthwaite"}, a - mean and variance adjusted test statistic is compute. - If \code{"scaled.shifted"}, an alternative mean and variance adjusted test - statistic is computed (as in Mplus version 6 or higher). - If \code{"boot"} or \code{"bootstrap"} or - \code{"Bollen.Stine"}, the Bollen-Stine bootstrap is used to compute - the bootstrap probability value of the test statistic. - If \code{"default"}, the value depends on the - values of other arguments.} -\item{bootstrap}{Number of bootstrap draws, if bootstrapping is used.} -\item{mimic}{If \code{"Mplus"}, an attempt is made to mimic the Mplus - program. If \code{"EQS"}, an attempt is made to mimic the EQS program. - If \code{"default"}, the value is (currently) set to to \code{"lavaan"}, - which is very close to\code{"Mplus"}.} -\item{representation}{If \code{"LISREL"} the classical LISREL matrix - representation is used to represent the model (using the all-y variant).} -\item{do.fit}{If \code{FALSE}, the model is not fit, and the current - starting values of the model parameters are preserved.} -\item{control}{A list containing control parameters passed to the optimizer. - By default, lavaan uses \code{"nlminb"}. See the manpage of - \code{\link{nlminb}} for an overview of the control parameters. - A different optimizer can be chosen by setting the value of - \code{optim.method}. For unconstrained optimization (the model syntax - does not include any "==", ">" or "<" operators), - the available options are \code{"nlminb"} (the default), \code{"BFGS"} and - \code{"L-BFGS-B"}. See the manpage of the \code{\link{optim}} function for - the control parameters of the latter two options. For constrained - optimization, the only available option is \code{"nlminb.constr"}.} \item{WLS.V}{A user provided weight matrix to be used by estimator \code{"WLS"}; if the estimator is \code{"DWLS"}, only the diagonal of this matrix will be used. For a multiple group analysis, a list with a weight matrix @@ -224,51 +58,12 @@ For a multiple group analysis, a list with an asymptotic variance-covariance matrix for each group. See the \code{WLS.V} argument for information about the order of the elements.} -\item{zero.add}{A numeric vector containing two values. These values affect the - calculation of polychoric correlations when some frequencies in the - bivariate table are zero. - The first value only applies for 2x2 tables. The second value for larger - tables. This value is added to the zero frequency in the bivariate table. - If \code{"default"}, the value is set depending on the \code{"mimic"} - option. By default, lavaan uses \code{zero.add = c(0.5. 0.0)}.} -\item{zero.keep.margins}{Logical. This argument only affects the computation - of polychoric correlations for 2x2 tables with an empty cell, and where a - value is added to the empty cell. If \code{TRUE}, the other values of the - frequency table are adjusted so that all margins are unaffected. If - \code{"default"}, the value is set depending on the \code{"mimic"}. The - default is \code{TRUE}.} -\item{zero.cell.warn}{Logical. Only used if some observed endogenous variables - are categorical. If \code{TRUE}, give a warning if one or more cells - of a bivariate frequency table are empty.} -\item{start}{If it is a character string, - the two options are currently \code{"simple"} and \code{"Mplus"}. - In the first - case, all parameter values are set to zero, except the factor loadings - (set to one), the variances of latent variables (set to 0.05), and - the residual variances of observed variables (set to half the observed - variance). - If \code{"Mplus"}, we use a similar scheme, but the factor loadings are - estimated using the fabin3 estimator (tsls) per factor. - If \code{start} is a fitted - object of class \code{\linkS4class{lavaan}}, the estimated values of - the corresponding parameters will be extracted. If it is a model list, - for example the output of the \code{paramaterEstimates()} function, - the values of the \code{est} or \code{start} or \code{ustart} column - (whichever is found first) will be extracted.} -\item{check}{Character vector. If \code{check} includes \code{"start"}, - the starting values are checked for possibly inconsistent values (for - example values implying correlations larger than one); - if \code{check} includes \code{"post"}, a check is performed after - (post) fitting, to check if the solution is admissable.} -\item{verbose}{If \code{TRUE}, the function value is printed out during - each iteration.} -\item{warn}{If \code{TRUE}, some (possibly harmless) warnings are printed - out during the iterations.} -\item{debug}{If \code{TRUE}, debugging information is printed out.} +\item{...}{Many more additional options can be defined, using 'name = value'. + See \code{\link{lavOptions}} for a complete list.} } \details{ The \code{sem} function is a wrapper for the more general - \code{\link{lavaan}} function, using the following default arguments: + \code{\link{lavaan}} function, but setting the following default options: \code{int.ov.free = TRUE}, \code{int.lv.free = FALSE}, \code{auto.fix.first = TRUE} (unless \code{std.lv = TRUE}), \code{auto.fix.single = TRUE}, \code{auto.var = TRUE}, diff -Nru r-cran-lavaan-0.5.22/man/simulateData.Rd r-cran-lavaan-0.5.23.1097/man/simulateData.Rd --- r-cran-lavaan-0.5.22/man/simulateData.Rd 2015-12-16 14:00:51.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/simulateData.Rd 2017-02-21 11:18:53.000000000 +0000 @@ -9,7 +9,7 @@ orthogonal = FALSE, std.lv = TRUE, auto.fix.first = FALSE, auto.fix.single = FALSE, auto.var = TRUE, auto.cov.lv.x = TRUE, auto.cov.y = TRUE, ..., sample.nobs = 500L, ov.var = NULL, - group.label = paste("G", 1:ngroups, sep = ""), skewness = NULL, + group.label = paste("G", 1:nblocks, sep = ""), skewness = NULL, kurtosis = NULL, seed = NULL, empirical = FALSE, return.type = "data.frame", return.fit = FALSE, debug = FALSE, standardized = FALSE) diff -Nru r-cran-lavaan-0.5.22/man/standardizedSolution.Rd r-cran-lavaan-0.5.23.1097/man/standardizedSolution.Rd --- r-cran-lavaan-0.5.22/man/standardizedSolution.Rd 2015-09-26 15:08:17.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/man/standardizedSolution.Rd 2017-02-24 12:58:59.000000000 +0000 @@ -7,7 +7,8 @@ \usage{ standardizedSolution(object, type = "std.all", se = TRUE, zstat = TRUE, pvalue = TRUE, remove.eq = TRUE, - remove.ineq = TRUE, remove.def = FALSE) + remove.ineq = TRUE, remove.def = FALSE, + GLIST = NULL, est = NULL) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} @@ -32,6 +33,11 @@ rows containing inequality constraints, if any.} \item{remove.def}{Logical. If TRUE, filter the ouitput by removing all rows containing parameter definitions, if any.} +\item{GLIST}{List of model matrices. If provided, they will be used +instead of the GLIST inside the object@Model slot.} +\item{est}{Numeric. Parameter values (as in the `est' column of a +parameter table). If provided, they will be used instead of +the parameters that can be extract from object.} } \value{ A data.frame containing standardized model parameters. diff -Nru r-cran-lavaan-0.5.22/MD5 r-cran-lavaan-0.5.23.1097/MD5 --- r-cran-lavaan-0.5.22/MD5 2016-09-24 16:02:02.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/MD5 2017-02-24 22:28:29.000000000 +0000 @@ -1,118 +1,127 @@ -f83bd1a4b09a929b6ec5b5d0d8e1dc0e *DESCRIPTION -d56e1585967383f886cf56909953c800 *NAMESPACE -8ea8145c11041640691c33f85af9c79c *R/00class.R -26ca78440eba9cc3147779f2f0bbece0 *R/00generic.R -0dd85fb0ff8c9980289678873e5c5e6c *R/01RefClass_00lavModel.R -c18bea80c3163906201dc7e01f6efac2 *R/01RefClass_01lavOptim.R +85cbe9317110b969f43eb18821870a64 *DESCRIPTION +1b6f179a29336540a0a803347c8ea008 *NAMESPACE +2c06e981ca673126bd34646156f9c1c8 *R/00class.R +d02be42658ab06953315c502439d65c6 *R/00generic.R +7dc16c76951b954557a170601dd51415 *R/01RefClass_00lavRefModel.R +efe636d8bc46031f30e74622c0f4567e *R/01RefClass_01lavOptim.R c8818fa21dd499ecdbceda5a339e8d15 *R/01RefClass_02lavML.R db43385c92c53d05d6f2a8d20b7858e8 *R/ctr_estfun.R d6920d8404718f56582ffdff50498771 *R/ctr_informative_testing.R 50747b931a9bfe8c2ee13350bb766718 *R/ctr_modelcov.R -2bb1fc7dd6a7a3051dd2216fbf4fd07d *R/ctr_mplus2lavaan.R +8743ac3ff8d84595ea44d777e482b867 *R/ctr_mplus2lavaan.R 2e4bf3cf21a238f4570930e8cb3e9747 *R/ctr_pairwise_fit.R 4a668097de5d79baef8f841c4908c518 *R/ctr_pairwise_table.R -f30094b9aef61c490a6e02618c2edfa7 *R/ctr_pml_plrt.R -dbf2a598632872e03352a8c38438fd12 *R/ctr_pml_plrt2.R +2980be79922355e9a78f252ebf532958 *R/ctr_pml_doubly_robust_utils.R +96d2dcdbdffad48d2566f8053cdf453b *R/ctr_pml_plrt.R +1cd5c477a8fdf7c01252f67a3a51189e *R/ctr_pml_plrt2.R 37899115aaac5dac12a2b6ce2a68a6a3 *R/ctr_pml_plrt_nested.R 2a06adcd6bb5668afb71b893ff8691ef *R/ctr_pml_utils.R f237c1cc3b7746be5ad12ee67400d958 *R/lav_binorm.R -21f15f6206a0de1f342b89301de4f6fb *R/lav_bootstrap.R +581485ed7e1fe3d65e64ee016754ad42 *R/lav_bootstrap.R 78a3bf33927df957e13fdcb1b21ad7b2 *R/lav_bootstrap_lrt.R acf7fe7756d4c2a9a40e8374ba95a75c *R/lav_constraints.R -87ca95cda815055f5f51e9845b4bd9a1 *R/lav_cor.R -c91323ebe49b5501f3d01f27b2aaa16c *R/lav_data.R +6ed2ae59edd0513bc33489486579e1ec *R/lav_cor.R +1c9d1e953439b5f563ad577a5f68342d *R/lav_data.R 08a44f9b08c49498e396b076bf0766ac *R/lav_dataframe.R f59abdc945b1904dddf8d758064ca052 *R/lav_export.R -e042cdffa9fbeb475597c8205e2d5157 *R/lav_export_bugs.R -ee05d11cb3abfa3a7133f6f2c754d8a4 *R/lav_export_mplus.R +674dd495eece3854c7c37d3a1b7fd1ce *R/lav_export_bugs.R +43f593152b2d73dfc42678fb83173e15 *R/lav_export_mplus.R 3cda47a41dcc8bee79bd7ec952b61fab *R/lav_fabin.R 55a3b989fc4a661c52fd201913d41e92 *R/lav_fiml.R -5b51440e1f3feb0a858e5733f68b83af *R/lav_fit.R -fbf3017e3b77b0490383015ab9ad92f2 *R/lav_fit_measures.R +927c6bdf214002c6e1d957aa1196a1da *R/lav_fit.R +66a27539c225fe6e29a9b3a5c0e43ee9 *R/lav_fit_measures.R +1181145124aa1612919339bc3b41f8f3 *R/lav_fsr_croon.R 1f6e6e42bbc82d84c6ceda3b61c5cfab *R/lav_func_deriv.R f6683b0da25bcfee057bc0d1b2732b9d *R/lav_graphics.R -0e5ba3afae6d1e59362e688ab904920b *R/lav_integrate.R +a04d2242134013d8b110738012999274 *R/lav_integrate.R +6894f2a2baebf698e15bfb0bdb06d91d *R/lav_lavaanList_inspect.R 47e479a777b7f69a49414b1e8532afaa *R/lav_lavaanList_methods.R 2b9193ae7b7bc3a5872214581126d0f0 *R/lav_lavaanList_multipleGroups.R 3608cb1844aabc56699cdf28b74a47ae *R/lav_lavaanList_multipleImputation.R b5fa0e59f276b50a5db71e28fb7bd852 *R/lav_lavaanList_simulate.R -d4eb3271954d03ea72796b9180f5be54 *R/lav_matrix.R -6f453207fb6d593153d3724aabcc6d3a *R/lav_missing.R -a0aee2b0893b726c3076c9da7d1a4153 *R/lav_model.R -e31e42d49ea0778fc0f9002ac8fa9be5 *R/lav_model_compute.R -b02f3714ff006892918015cb919501af *R/lav_model_estimate.R -78ce59c7bd5507e3009de530d5655c77 *R/lav_model_gradient.R -e6e136a08a4049094e42fc10875861fe *R/lav_model_gradient_mml.R +6c02af13d17c94163c95f490d2a6502b *R/lav_matrix.R +e9dc97111fa485af0835417e16a3a932 *R/lav_model.R +ce5e150be5da2ab82257c75b1b6c6618 *R/lav_model_compute.R +642d8e64bc125f07ea2bb5366602a89e *R/lav_model_estimate.R +4118d18d3b91c222cd024acf27e02336 *R/lav_model_gradient.R +2e8276f6c234811092b8c974bbf414ff *R/lav_model_gradient_mml.R 66a94bb894225a3eca573b7f21af358b *R/lav_model_gradient_pml.R -c433ea5fb6bd30faa4afb264fac44879 *R/lav_model_hessian.R -de11cbeee36f7482965e3519e85a9e04 *R/lav_model_implied.R -f97369b41f35bcf3a0b3f9b4c9bad35b *R/lav_model_information.R -760a7b846e6c76ce7bfaf3f8f3c34147 *R/lav_model_lik.R -1f800e65708d45c6677fde29e99a4106 *R/lav_model_objective.R -d8d547e84cd820b344c9aa8a285c1102 *R/lav_model_utils.R -8d242f9a64c8ca3b7303d81547508b38 *R/lav_model_vcov.R -263338260d11ba69984fff55d1f23c6b *R/lav_model_wls.R -1aea311c1768c4014660cdae77a153c2 *R/lav_modification.R +61d3d322cc7fdde173d547ee16581964 *R/lav_model_hessian.R +eeb577f2bf89e64fd0b813060adde9ab *R/lav_model_implied.R +c42abb1a90e517543a15be92d8eea3e2 *R/lav_model_information.R +99eff636976f651be125d6fa18c36e21 *R/lav_model_lik.R +1a67a3244a1cbe1f09c7fceba1f52187 *R/lav_model_objective.R +ea82deadd29d3f191e9154f4f55c8e22 *R/lav_model_utils.R +436e23c22cfb243aacc837503ac06ead *R/lav_model_vcov.R +7c78c87ce2b1b628bc22844b71313185 *R/lav_model_wls.R +0bbd69945544e58c1c62c60f55716da1 *R/lav_modification.R a5f097cfa2a38f95e77ad182b12e5736 *R/lav_mplus.R -ba77017915ffe54d47ac28b16ab64b68 *R/lav_muthen1984.R -aac7a75a6bed4817dbe666ccdb69705d *R/lav_mvnorm.R -1cd956292df83c38f750f9e7c90e5b27 *R/lav_mvnorm_h1.R -e0f25435bdd4b5d8a2a12908f8d8297e *R/lav_mvnorm_missing.R -12524a2566daabf5e42ae833b456f21b *R/lav_mvnorm_missing_h1.R -e3bc9d85b5abcc288c941cf70ae2e6e3 *R/lav_mvreg.R +4a8583ac4144633a40c0c1cc9bfeb65e *R/lav_muthen1984.R +eaa2bd1288baf4cf8bc6de11b1d00b6f *R/lav_mvnorm.R +5080539e20975211859becc9f1c5939d *R/lav_mvnorm_h1.R +5b36cd325116e17614702f7c38f9b9e2 *R/lav_mvnorm_missing.R +b83484bb56718b8b07959b2ae1f4d8ee *R/lav_mvnorm_missing_h1.R +6d65915876942b351b1cb3fe2ac59219 *R/lav_mvreg.R e43f0a4520aeb774aa19e95a6aca69bc *R/lav_nlminb_constr.R 5d83a269e546be51e48a808545133f01 *R/lav_norm.R -441e36a15f597f07f8e093c532a7279d *R/lav_object_generate.R -605ab29f62728012368bd9cdcd091b0f *R/lav_object_inspect.R -c8061c1bdf0735b8faf6876dc9d231e8 *R/lav_object_methods.R +3562b9127f2a6e2c7ddf3ca81f7eabdb *R/lav_object_generate.R +f3ea17468cb9580f5f45734ae3e3247c *R/lav_object_inspect.R +eb100380277725a9cfd938574a461503 *R/lav_object_methods.R 3dbae6edf3f95ec4eccc2b2dcf4a5821 *R/lav_object_post_check.R -11c88196fdda4aebcf742aa980a3a5d0 *R/lav_objective.R +4bea7cbb90e9b9681c73c7f35b589016 *R/lav_objective.R 9d1b727a7a04ebc600e4f03811150073 *R/lav_ols.R -0ff166889478a361f509988fa8cd41cc *R/lav_options.R -34fe595bd39a1b4d00e4408db209e2e4 *R/lav_partable.R +104ccc9befb7265a06952eca792d1c2e *R/lav_options.R +ff133e16f8499b7e436b69038f2aa12b *R/lav_partable.R +a0fecf6d3aeb22d937b6a2f0e72e8a9d *R/lav_partable_attributes.R 922cf90400fef61246c15a8bf416f2ee *R/lav_partable_check.R -24cefc8553bc624670b84f0fac1489ff *R/lav_partable_complete.R -58ed208075721dd9ad34284c47f85606 *R/lav_partable_constraints.R +cacb1dc8b9b8f0811af79883befca31f *R/lav_partable_complete.R +69f229fe4207f87ec584b7b7f824aef0 *R/lav_partable_constraints.R +2b8c7cec7785acdf0214fcddf704e458 *R/lav_partable_flat.R 5df28f620e9b4079812a097ba195a346 *R/lav_partable_from_lm.R -69a789bfeab27d1a8aa7ecd4969f2bb3 *R/lav_partable_independence.R -c1855cd70b608a4a57893639d573034f *R/lav_partable_merge.R -ace679c69787dad2427dae36ec255ea4 *R/lav_partable_unrestricted.R -a4bfb2725207669b62aabd3401bff411 *R/lav_partable_vnames.R +4c1181cf31b9326918ee9362ca11ce9d *R/lav_partable_full.R +eb9ba742eda2cab0d379ff1c18219d38 *R/lav_partable_independence.R +628e181964bd94246c8aa00e70b95110 *R/lav_partable_labels.R +09d977c10cc73bb9bbc4fe9040545dde *R/lav_partable_merge.R +c6db81581a81ef93df9bb0b0d5c6adb6 *R/lav_partable_subset.R +170bfea2ae33549738620ef54fe3862c *R/lav_partable_unrestricted.R +d27638ad54696d03ac432afc410454b7 *R/lav_partable_utils.R +1d59a968e40e904a9e3de8642ed84415 *R/lav_partable_vnames.R 0e9330efc762a1624539e3de8f107826 *R/lav_pearson.R -798e32ce91865dd6399f3207a87ec79d *R/lav_polychor.R +89f78a64372a29e8137f558e79a1fd63 *R/lav_polychor.R 1082355ff850feeb16b3f91b6f63e4c1 *R/lav_polyserial.R -5d0b573902945fe462c899a249052ab3 *R/lav_predict.R +40059db7654ebcf91e7ed780b4274d8b *R/lav_predict.R 5772816536e8e78943c8f9356f9a6b19 *R/lav_prelis.R -718d87cb293d3568f1c873d36ec8e490 *R/lav_print.R +b8c4aeb8fc7c892a2be954c849e393b3 *R/lav_print.R 9f8025fb464c03dacfa18692d76cdba7 *R/lav_probit.R -66163fca8f4009fd3cdb7733b98c2ca0 *R/lav_representation_lisrel.R -9319e55c15fd8b8d2b02412e17a97f1d *R/lav_residuals.R -a891ceb9fac7c6888ad95dcd4866164e *R/lav_samplestats.R -4fbcca1c8a1ea214b0b89d28f0da7b71 *R/lav_samplestats_gamma.R +5b7d428b3d7be51a57191b0680d6cff0 *R/lav_representation.R +c9ccab2abb5c999b7335fcb4b9765c32 *R/lav_representation_lisrel.R +ef1f7fb540578a364b5d02ba8750eda2 *R/lav_residuals.R +185c104eec587587f65a94fb8fb29bf0 *R/lav_samplestats.R +13b7ff1768cccafa70f47edecc9b5756 *R/lav_samplestats_gamma.R bb369a476bca2e7c8183f579d30b8777 *R/lav_samplestats_icov.R 257a1eb16e8c380cfc617889561bd7ce *R/lav_samplestats_igamma.R cee93731b368f057118e8b8d8e3f9d9f *R/lav_samplestats_step1.R -6cef92789ed16e6933302e736378981f *R/lav_samplestats_step2.R +a9d3cf9cec393eeecf892ff581307612 *R/lav_samplestats_step2.R 2a34b9538206daf1d2706aa642f4af67 *R/lav_samplestats_wls_obs.R -b4febf45fc2290f8de0501b277fd760e *R/lav_simulate.R -bcf64b2c42519836245bd938ac70398c *R/lav_standardize.R -da7063a80b7fcdb1a3ce7e0cd421e450 *R/lav_start.R +24574624218d7d39683b0d441b124e18 *R/lav_simulate.R +dcc301411cd8bd4f1ab5017f03a42e8c *R/lav_standardize.R +980d2ee512e4161479c24262e2407649 *R/lav_start.R 5a98227abbbcc60879ee20cff7d42c71 *R/lav_syntax.R e53cfe7dbe16fb84f466f9092ac106c4 *R/lav_syntax_independence.R -3088779dd29eded8d85b877bf15d5419 *R/lav_tables.R +8862508aaa88781c225712d60a4f83cb *R/lav_tables.R 2ef06fa1d328ec66c323c95fd1badeae *R/lav_tables_mvb.R -a46642adbb78d1250b565554b37db03c *R/lav_test.R +1f8a1698fb32a035ecb925122d1f3938 *R/lav_test.R f1ed09c297b98070e4143c1e0d9fb0bb *R/lav_test_LRT.R 4fab9b5a0a3c04bdb02c2149496854da *R/lav_test_Wald.R -1e70d29660512dbd10cf2e3c6eea7faa *R/lav_test_diff.R -b70cb2daf06d0f7956a74f5315b26920 *R/lav_test_satorra_bentler.R -6928eefa29f78e0920495449df216bd6 *R/lav_test_score.R -f4f58f5d6fb8378edffae3f01117170b *R/lav_utils.R +b84a4f37d8bd35e5fdb307822af124c5 *R/lav_test_diff.R +63260bbd2c47479ee96751ce547e1da7 *R/lav_test_satorra_bentler.R +4534f85e5ae8a2c000372b940175f735 *R/lav_test_score.R +4747e171a70889c14521a768151b8311 *R/lav_utils.R e55602f251d703e85bd891db1ab23bb2 *R/lavaan-deprecated.R -3bbea302b122681aef137b50c3c205e9 *R/xxx_fsr.R -a7d41f484f3cd401acd587e51ff3ea38 *R/xxx_lavaan.R -b691177ab8b9a3f8dd334adcc20b377f *R/xxx_lavaanList.R -1e860a685393ff830e91e9aafda0bdae *R/xxx_prelav.R +9f98e4dd8b7f1d46765d3343feaf4169 *R/xxx_fsr.R +6064003b1b158e6dc73d12e3dffc72aa *R/xxx_lavaan.R +80c919eccb8bdf6b832a929c8cac94b8 *R/xxx_lavaanList.R +ab268a2eeb7c39f66ee65c3cf2dd3edc *R/xxx_prelav.R 63eac6a2eb3399ebccd1558b3124353d *R/zzz.R 3b9220c0c6ba9e0d5a7afac5ed96d94c *R/zzz_OLDNAMES.R 9e007dd29a2cc3ba2bbf789337706826 *README @@ -127,18 +136,20 @@ cf2455a356ece4902222b7a4597c5b9e *man/InformativeTesting.Rd 798b9cb26b058c6656e3333315cfa184 *man/PoliticalDemocracy.Rd 9738b5515aac6b61f5c3c000b01a9b21 *man/bootstrap.Rd -19d3b308f177532b6fd3e0f1d8546770 *man/cfa.Rd +0345234b227b87194ae179f7f6a049fe *man/cfa.Rd 86e7e1309c7ec3a0199e2ec03007a34b *man/estfun.Rd 1952397a65d3bb19089f213561c1a998 *man/fitMeasures.Rd -0519025bc98252bed281194bc95dec13 *man/fsr.Rd +1f07df92df54b4f7be7de71e1f370da4 *man/fsr.Rd e10ce2f596f1e9105b12d1e9860aacfd *man/getCov.Rd -8653f26dc7e613fec82585d99c37161a *man/growth.Rd -5dfbf2a6677c5fad500846aa87513c7b *man/inspectSampleCov.Rd +9cebe1d44273925ea3e6b3ab02cf2d72 *man/growth.Rd +73e825fa761127c81205c806be420ffa *man/inspectSampleCov.Rd b33382dcd6e15dc2d62e130c0006959b *man/lavCor.Rd 225af6a64d1104380844614ca4191caa *man/lavExport.Rd -03724b90d531ee6a526e16f048b7a702 *man/lavInspect.Rd +82d47428d8b8f014665079c5d7ad3303 *man/lavInspect.Rd +ef260cb8a8feaffdba84528cdc1b3f4b *man/lavListInspect.Rd dd828fdc7f100a867061aa464b3a55b0 *man/lavMatrixRepresentation.Rd -12f7e578a81c0ce8c5f1ae2a3ff0d346 *man/lavNames.Rd +fff3a2754f2e4996ade52d8eb794ab44 *man/lavNames.Rd +69d6a8669b54b60dabdc42d61df1a16d *man/lavOptions.Rd 299c95c112718f7d852ba8c33b3f5f68 *man/lavParTable.Rd 557cbd44a5edac922e6284934b269478 *man/lavPredict.Rd d2e8961bb9e6e83fc8bc38c476f8ccc1 *man/lavTables.Rd @@ -146,22 +157,28 @@ 7fa0dff6a902cb32a81badad0d99c4db *man/lavTestLRT.Rd 387144a2126e7bfbecdef35b8282c1a7 *man/lavTestScore.Rd b2e463238f7fe7bfd43176d5443bbd2d *man/lavTestWald.Rd -7337a9b16d105092b1c49aa5d34e71ff *man/lav_constraints.Rd +81f283668573ca1a58784c8478f50be4 *man/lav_constraints.Rd f057078c4e6dbf973f4461ca1a560dc2 *man/lav_func.Rd 7b73fb60ab06c6e5ada8216f47d9c3ee *man/lav_matrix.Rd -eed739f08318789212f0c217b918512f *man/lav_model.Rd -2a4f1cddacbbf5ee3ddfe7a4924ae42b *man/lav_partable.Rd -66240e42e14d28e7f50ac3d106dcf38f *man/lavaan-class.Rd +af2579cb02a596b3a204f2f5abb6b8d9 *man/lav_model.Rd +ba05e4d7d9017d8dbb9111b1c1b81db9 *man/lav_partable.Rd +b3a44227f7af430828548addfd6f6b51 *man/lavaan-class.Rd 12c96de013f21abae8a64ae3b83e4845 *man/lavaan-deprecated.Rd -6f6ba01e403c7b2054c01ce4f9b3a171 *man/lavaan.Rd +2a9ce97b038dacf567ef297f72f6cfd2 *man/lavaan.Rd 76ace308ab03a4f3000c7d6a92028418 *man/lavaanList-class.Rd -e744969b03fb6b5e49f0847a1570e8ba *man/lavaanList.Rd +c8e07abdc279f4d2c142ccbac61717fe *man/lavaanList.Rd 86a16eee44d8d394b0d9cf59a438365f *man/model.syntax.Rd 3b1606f152eb91e705cfa4ed28271b80 *man/modificationIndices.Rd -f2869783c1166198d699406cb4eca794 *man/mplus2lavaan.Rd +daa58c2293dc757b81235068df0a7a51 *man/mplus2lavaan.Rd +9e2d7388f9de4c8314af3a679b5c4471 *man/mplus2lavaan.modelSyntax.Rd 54df69ab414febb1b03b4232ed7b533b *man/parameterEstimates.Rd 0c7105b43a2ca7c3b1aa0793507580ca *man/plot.InformativeTesting.Rd -437864b02f49105e934abe162dc12dcb *man/sem.Rd -cdb7e37cafb605770c629b18f4c0e5b8 *man/simulateData.Rd -ec23b1842f5f32c191516150fe4426e6 *man/standardizedSolution.Rd +6029d7b0733788cafaf13f427bc35e19 *man/sem.Rd +4ad763d50981425d29ba3af2fab22dc2 *man/simulateData.Rd +392ce24890e602302ceddd4a88624386 *man/standardizedSolution.Rd 31fb942eea99dbd852bd8ea3f2ed0a7b *man/varTable.Rd +57845942affbefe30b1e0fce405a406b *tests/testthat.R +3a80335c74d1ce33a58a807939e1d1c4 *tests/testthat/helper-skip_level.R +cbc5444212e37b23305b07a1681a0328 *tests/testthat/test-lav_matrix.R +a12db7b5aafb3df1955d471eb1314a7f *tests/testthat/test-lav_mvnorm.R +af2a8848a7d40dbed3ec9fa7ea99c517 *tests/testthat/test-skip_example.R diff -Nru r-cran-lavaan-0.5.22/NAMESPACE r-cran-lavaan-0.5.23.1097/NAMESPACE --- r-cran-lavaan-0.5.22/NAMESPACE 2016-07-20 09:00:39.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/NAMESPACE 2017-02-17 07:29:04.000000000 +0000 @@ -53,8 +53,11 @@ "lavParseModelString", # "parseModelString", "lavInspect", "inspect", "lavTech", + "lavListInspect", "lavListTech", # utilities "getCov", "char2num", "cor2cov", + # options, + "lavOptions", "modindices", "modificationIndices", "modificationindices", "standardizedSolution", "standardizedsolution", @@ -72,6 +75,7 @@ "lavTestLRT", "lavTestWald", "lavTestScore", "lavMatrixRepresentation", "mplus2lavaan", + "mplus2lavaan.modelSyntax", #"prelav", #"lavData", "lavPredict", @@ -128,6 +132,9 @@ "lav_partable_complete", "lav_partable_attributes", "lav_partable_merge", + "lav_partable_constraints_def", + "lav_partable_constraints_ceq", + "lav_partable_constraints_ciq", # lav_constraints "lav_constraints_parse", @@ -166,7 +173,6 @@ "vcov", "logLik", "fitted.values", "fitted", - "inspect", "summary" ) @@ -177,9 +183,17 @@ S3method(print, lavaan.vector) S3method(print, lavaan.parameterEstimates) S3method(print, prelav) +S3method(print, lavaan.fsr) +S3method(summary, lavaan.fsr) S3method(pairs, lavaan) S3method(print, InformativeTesting) S3method(plot, InformativeTesting) # S3method(print, lavaan.tables.fit.Cf) # S3method(print, lavaan.tables.fit.Cp) # S3method(print, lavaan.tables.fit.Cm) +S3method(inspect, lavaan) +S3method(inspect, lavaanList) +S3method(lavInspect, lavaan) +S3method(lavTech, lavaan) +S3method(lavInspect, lavaanList) +S3method(lavTech, lavaanList) diff -Nru r-cran-lavaan-0.5.22/R/00class.R r-cran-lavaan-0.5.23.1097/R/00class.R --- r-cran-lavaan-0.5.22/R/00class.R 2016-07-23 13:10:15.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/00class.R 2017-02-24 10:29:26.000000000 +0000 @@ -9,7 +9,10 @@ data.type="character", # "full", "moment" or "none" ngroups="integer", # number of groups group="character", # group variable + nlevels="integer", # number of levels + cluster="character", # cluster variable(s) group.label="character", # group labels + level.label="character", # level labels std.ov="logical", # standardize observed variables? nobs="list", # effective number of observations norig="list", # original number of observations @@ -17,12 +20,14 @@ ov.names.x="list", # exo variable names (per group) #ov.types="list", # variable types (per group) #ov.idx="list", # column indices (all observed variables) + ordered="character", # ordered variables ov="list", # variable table case.idx="list", # case indices per group missing="character", # "listwise" or not? Mp="list", # if not complete, missing patterns # we need this here, to get nobs right! Rp="list", # response patterns (categorical only) + Lp="list", # level patterns eXo="list", # local copy exo only X="list" # local copy ) @@ -31,7 +36,6 @@ setClass("lavSampleStats", # sample moments representation( - CAT="list", var="list", # observed variances (per group) cov="list", # observed var/cov matrix (per group) mean="list", # observed mean vector (per group) @@ -69,11 +73,15 @@ missing.flag="logical", # missing patterns? missing="list", # missingness information - missing.h1="list" # h1 model + missing.h1="list", # h1 model + + YLp = "list", # cluster/level information + + zero.cell.tables="list" # bivariate tables with empty cells ) ) -setClass("Model", # MATRIX representation of the sem model +setClass("lavModel", # MATRIX representation of the sem model representation( GLIST="list", # list of all model matrices (for all groups) dimNames="list", # dim names for the model matrices @@ -85,9 +93,9 @@ categorical="logical", group.w.free="logical", link="character", - control="list", - ngroups="integer", + nblocks="integer", + ngroups="integer", # only for rsem!! (which uses rsem:::computeDelta) nmat="integer", nvar="integer", num.idx="list", @@ -135,8 +143,9 @@ ov.x.dummy.ov.idx="list", ov.x.dummy.lv.idx="list", ov.y.dummy.ov.idx="list", - ov.y.dummy.lv.idx="list" + ov.y.dummy.lv.idx="list", + estimator="character" ) ) @@ -173,7 +182,7 @@ pta = "list", # parameter table attributes Data = "lavData", # full data SampleStats = "lavSampleStats", # sample statistics - Model = "Model", # internal matrix representation + Model = "lavModel", # internal matrix representation Cache = "list", # housekeeping stuff Fit = "Fit", # fitted results boot = "list", # bootstrap results @@ -192,7 +201,7 @@ ParTable = "list", pta = "list", Data = "lavData", # from first dataset (ngroups!) - Model = "Model", # based on first dataset + Model = "lavModel", # based on first dataset meta = "list", timingList = "list", diff -Nru r-cran-lavaan-0.5.22/R/00generic.R r-cran-lavaan-0.5.23.1097/R/00generic.R --- r-cran-lavaan-0.5.22/R/00generic.R 2015-09-26 16:13:36.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/00generic.R 2017-02-21 16:33:02.000000000 +0000 @@ -1,6 +1,3 @@ -# for simsem: uses 'inspect' in exportMethods in NAMESPACE -setGeneric("inspect", function(object, ...) standardGeneric("inspect")) - # for blavaan setGeneric("fitMeasures", function(object, fit.measures = "all", baseline.model = NULL) @@ -9,3 +6,24 @@ function(object, fit.measures = "all", baseline.model = NULL) standardGeneric("fitmeasures")) + +# S3 generics +inspect <- function(object, what = "free", ...) { + UseMethod("inspect", object) +} + +lavInspect <- function(object, what = "free", + add.labels = TRUE, + add.class = TRUE, + list.by.group = TRUE, + drop.list.single.group = TRUE) { + UseMethod("lavInspect", object) +} + +lavTech <- function(object, what = "free", + add.labels = FALSE, + add.class = FALSE, + list.by.group = FALSE, + drop.list.single.group = FALSE) { + UseMethod("lavTech", object) +} diff -Nru r-cran-lavaan-0.5.22/R/01RefClass_00lavModel.R r-cran-lavaan-0.5.23.1097/R/01RefClass_00lavModel.R --- r-cran-lavaan-0.5.22/R/01RefClass_00lavModel.R 2015-02-06 08:31:08.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/01RefClass_00lavModel.R 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -# generic statistical model -- YR 10 july 2012 - - -# super class -- virtual statistical model -lavRefModel <- setRefClass("lavModel", - -# fields -fields = list( - npar = "integer", # number of free model parameters - theta = "numeric", # the model parameters (free only) - theta.labels = "character" # parameter names (if any) -), - -# methods -methods = list( - -show = function(header=TRUE) { - if(header) - cat(class(.self), "model parameters (theta):\n") - out <- theta # avoid changing theta by giving names - if(length(theta.labels) > 0L) - names(out) <- theta.labels - print(out) -} - -)) - diff -Nru r-cran-lavaan-0.5.22/R/01RefClass_00lavRefModel.R r-cran-lavaan-0.5.23.1097/R/01RefClass_00lavRefModel.R --- r-cran-lavaan-0.5.22/R/01RefClass_00lavRefModel.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/01RefClass_00lavRefModel.R 2017-01-25 17:58:52.000000000 +0000 @@ -0,0 +1,27 @@ +# generic statistical model -- YR 10 july 2012 + + +# super class -- virtual statistical model +lavRefModel <- setRefClass("lavRefModel", + +# fields +fields = list( + npar = "integer", # number of free model parameters + theta = "numeric", # the model parameters (free only) + theta.labels = "character" # parameter names (if any) +), + +# methods +methods = list( + +show = function(header=TRUE) { + if(header) + cat(class(.self), "model parameters (theta):\n") + out <- theta # avoid changing theta by giving names + if(length(theta.labels) > 0L) + names(out) <- theta.labels + print(out) +} + +)) + diff -Nru r-cran-lavaan-0.5.22/R/01RefClass_01lavOptim.R r-cran-lavaan-0.5.23.1097/R/01RefClass_01lavOptim.R --- r-cran-lavaan-0.5.22/R/01RefClass_01lavOptim.R 2015-02-06 08:31:08.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/01RefClass_01lavOptim.R 2017-01-25 17:59:25.000000000 +0000 @@ -6,7 +6,7 @@ lavRefOptim <- setRefClass("lavOptim", # inherits -contains = "lavModel", +contains = "lavRefModel", # fields fields = list( diff -Nru r-cran-lavaan-0.5.22/R/ctr_mplus2lavaan.R r-cran-lavaan-0.5.23.1097/R/ctr_mplus2lavaan.R --- r-cran-lavaan-0.5.22/R/ctr_mplus2lavaan.R 2015-02-06 08:31:08.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/ctr_mplus2lavaan.R 2017-01-08 14:26:58.000000000 +0000 @@ -1,6 +1,4 @@ # this code is written by Michael Hallquist - - #First draft of parser to convert Mplus model syntax to lavaan model syntax #idea: build parTable and run model from mplus syntax @@ -485,7 +483,7 @@ #split into vector of strings #syntax.split <- unlist( strsplit(syntax, "\n") ) - syntax.split <- unlist( strsplit(syntax, ";") ) + syntax.split <- trimSpace(unlist( strsplit(syntax, ";") )) #format of parTable to mimic. # 'data.frame': 34 obs. of 12 variables: @@ -615,13 +613,21 @@ } -mplus2lavaan <- function(inpfile) { - #require(lavaan) - - if (!file.exists(inpfile)) stop("Could not find file: ", inpfile) - - inpfile.text <- scan(inpfile, what="character", sep="\n", strip.white=FALSE, blank.lines.skip=FALSE) +mplus2lavaan <- function(inpfile, run=TRUE) { + stopifnot(length(inpfile) == 1L) + stopifnot(grepl("\\.inp$", inpfile)) + if (!file.exists(inpfile)) { stop("Could not find file: ", inpfile) } + + #for future consideration. For now, require a .inp file +# if (length(inpfile) == 1L && grepl("\\.inp$", inpfile)) { +# if (!file.exists(inpfile)) { stop("Could not find file: ", inpfile) } +# inpfile.text <- scan(inpfile, what="character", sep="\n", strip.white=FALSE, blank.lines.skip=FALSE, quiet=TRUE) +# } else { +# #assume that inpfile itself is syntax (e.g., in a character vector) +# inpfile.text <- inpfile +# } + inpfile.text <- scan(inpfile, what="character", sep="\n", strip.white=FALSE, blank.lines.skip=FALSE, quiet=TRUE) sections <- divideInputIntoSections(inpfile.text, inpfile) mplus.inp <- list() @@ -693,27 +699,55 @@ bootstrap <- as.numeric(substr(mplus.inp$analysis$bootstrap, attr(nboot.match, "capture.start"), attr(nboot.match, "capture.start") + attr(nboot.match, "capture.length") - 1L)) } } - fit <- sem(mplus.inp$model, data=mplus.inp$data, meanstructure=meanstructure, mimic="Mplus", estimator=estimator, test=test, se=se, bootstrap=bootstrap, information=information) - return(list(lav.out=fit, mplus.inp=mplus.inp)) + if (run) { + fit <- sem(mplus.inp$model, data=mplus.inp$data, meanstructure=meanstructure, mimic="Mplus", estimator=estimator, test=test, se=se, bootstrap=bootstrap, information=information) + fit@external <- list(mplus.inp=mplus.inp) + } else { + fit <- mplus.inp #just return the syntax outside of a lavaan object + } + + return(fit) } + divideIntoFields <- function(section.text, required) { + if (is.null(section.text)) { return(NULL) } - section.split <- strsplit(paste(section.text, collapse=" "), ";", fixed=TRUE)[[1]] + #The parser breaks down when there is a line with a trailing comment because then splitting on semicolon will combine it with the following line + #Thus, trim off trailing comments before initial split + section.text <- gsub("\\s*!.*$", "", section.text, perl=TRUE) + section.split <- strsplit(paste(section.text, collapse=" "), ";", fixed=TRUE)[[1]] #split on semicolons section.divide <- list() for (cmd in section.split) { if (grepl("^\\s*!.*", cmd, perl=TRUE)) next #skip comment lines if (grepl("^\\s+$", cmd, perl=TRUE)) next #skip blank lines - #force text matches at word boundary, or just split on = (\b doesn't work for =) - cmd.split <- strsplit(cmd[1L], "(\\b(IS|ARE|is|are|Is|Are)\\b|=)", perl=TRUE)[[1]] - if (!length(cmd.split) == 2L) stop("First line not dividing into LHS and RHS: ", cmd[1L]) - - cmdName <- trimSpace(cmd.split[1L]) - cmdArgs <- trimSpace(cmd.split[2L]) + #mplus is apparently tolerant of specifications that don't include IS/ARE/= + #example: usevariables x1-x10; + #thus, split on spaces and assume that first element is lhs, drop second element if IS/ARE/=, and assume remainder is rhs + + #but if user uses equals sign, then spaces will not always be present (e.g., usevariables=x1-x10) + if ( (leadingEquals <- regexpr("^\\s*[A-Za-z]+[A-Za-z_-]*\\s*(=)", cmd[1L], perl=TRUE))[1L] > 0) { + cmdName <- trimSpace(substr(cmd[1L], 1, attr(leadingEquals, "capture.start") - 1)) + cmdArgs <- trimSpace(substr(cmd[1L], attr(leadingEquals, "capture.start") + 1, nchar(cmd[1L]))) + } else { + cmd.spacesplit <- strsplit(trimSpace(cmd[1L]), "\\s+", perl=TRUE)[[1L]] + + if (length(cmd.spacesplit) < 2L) { + #for future: make room for this function to prase things like just TECH13 (no rhs) + } else { + cmdName <- trimSpace(cmd.spacesplit[1L]) + if (length(cmd.spacesplit) > 2L && tolower(cmd.spacesplit[2L]) %in% c("is", "are")) { + cmdArgs <- paste(cmd.spacesplit[3L:length(cmd.spacesplit)], collapse=" ") #remainder, removing is/are + } else { + cmdArgs <- paste(cmd.spacesplit[2L:length(cmd.spacesplit)], collapse=" ") #is/are not used, so just join rhs + } + } + + } section.divide[[make.names(tolower(cmdName))]] <- cmdArgs @@ -772,7 +806,10 @@ else datFile <- file.path(inpfile.split$directory, mplus.inp$data$file) #dat file path is relative or absent, and inp file directory is present - if (!file.exists(datFile)) stop("Cannot find data file: ", datFile) + if (!file.exists(datFile)) { + warning("Cannot find data file: ", datFile) + return(NULL) + } #handle missing is/are: missList <- NULL @@ -798,7 +835,7 @@ if (missSpec == "." || missSpec=="*") { #case 1: MISSING ARE|=|IS .; na.strings <- missSpec - } else if ((allMatch <- regexpr("\\s+ALL\\s+\\(([^\\)]+)\\)", missSpec, perl=TRUE))[1L] > -1L) { #case 2: use of ALL with parens + } else if ((allMatch <- regexpr("\\s*ALL\\s*\\(([^\\)]+)\\)", missSpec, perl=TRUE))[1L] > -1L) { #case 2: use of ALL with parens missStr <- trimSpace(substr(missSpec, attr(allMatch, "capture.start"), attr(allMatch, "capture.start") + attr(allMatch, "capture.length") - 1L)) na.strings <- expandMissVec(missStr) } else { #case 3: specific missing values per variable @@ -832,7 +869,7 @@ } else { stop("I don't understand this missing specification: ", missSpec) } } } else { na.strings <- "NA" } - + if (!is.null(missList)) { dat <- read.table(datFile, header=FALSE, col.names=mplus.inp$variable$names, colClasses="numeric") #loop over variables in missList and set missing values to NA @@ -848,7 +885,6 @@ } - #TODO: support covariance/mean+cov inputs #store categorical variables as ordered factors diff -Nru r-cran-lavaan-0.5.22/R/ctr_pml_doubly_robust_utils.R r-cran-lavaan-0.5.23.1097/R/ctr_pml_doubly_robust_utils.R --- r-cran-lavaan-0.5.22/R/ctr_pml_doubly_robust_utils.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/ctr_pml_doubly_robust_utils.R 2016-11-18 08:34:48.000000000 +0000 @@ -0,0 +1,397 @@ +# This code was contributed by Myrsini Katsikatsou (LSE) -- September 2016 +# +# compute_uniCondProb_based_on_bivProb() +# pairwiseExpProbVec_GivenObs() +# LongVecTH.Rho.Generalised() +# pairwiseExpProbVec_GivenObs_UncMod() + +compute_uniCondProb_based_on_bivProb <- function(bivProb, nvar, + idx.pairs, + idx.Y1, + idx.Gy2, + idx.cat.y1.split, + idx.cat.y2.split) { + bivProb.split <- split(bivProb, idx.pairs) + lngth <- 2*length(bivProb) + idx.vec.el <- 1:lngth + ProbY1Gy2 <- rep(NA, lngth) + no.pairs <- nvar*(nvar-1)/2 + idx2.pairs <- combn(nvar,2) + + for(k in 1:no.pairs){ + y2Sums <- tapply(bivProb.split[[k]], idx.cat.y2.split[[k]], sum) + y2Sums.mult <- y2Sums[idx.cat.y2.split[[k]] ] + Y1Gy2 <- bivProb.split[[k]]/ y2Sums.mult + tmp.idx.vec.el <- idx.vec.el[(idx.Y1 == idx2.pairs[1,k]) & + (idx.Gy2 == idx2.pairs[2,k])] + ProbY1Gy2[tmp.idx.vec.el] <- Y1Gy2 + } + + for(k in 1:no.pairs){ + y1Sums <- tapply(bivProb.split[[k]], idx.cat.y1.split[[k]], sum) + y1Sums.mult <- y1Sums[idx.cat.y1.split[[k]] ] + Y2Gy1 <- bivProb.split[[k]]/ y1Sums.mult + reordered_Y2Gy1 <- Y2Gy1[order(idx.cat.y1.split[[k]])] + tmp.idx.vec.el <- idx.vec.el[(idx.Y1 == idx2.pairs[2,k]) & + (idx.Gy2 == idx2.pairs[1,k])] + ProbY1Gy2[tmp.idx.vec.el] <- reordered_Y2Gy1 + } + + ProbY1Gy2 +} + +# The input of the function is a lavobject, which, in turn, is the output of the +# sem function having specified estimator="PML", missing="available.cases" + +#The output of the function is a list of two lists: the pairwiseProbGivObs list and +# the univariateProbGivObs list. Each of the two lists consists of G matrices where G +# is the number of groups in a multigroup analysis. If G=1 each of the lists +# contains only one matrix that can be called as pairwiseProbGivObs[[1]], and +# univariateProbGivObs[[1]]. + +# Each of the matrices in the pairwiseProbGivObs list is of dimension: nrow=sample size, +#ncol=sum of the number of response categories for all pairs of variables +#(i.e. the length of the vector pxixj.ab where i1, it is a list of G matrices + #where G the number of groups and the matrices are fo dimension + #nrow=sample size and ncol=number of items. + #If lavobject@Data@ngroups=1 then yhat is a matrix. + yhat <- lavPredict(object=lavobject, type = "yhat" ) + + #compute bivariate probabilities + ngroups <- lavobject@Data@ngroups + univariateProb <- vector("list", length=ngroups) + pairwiseProb <- vector("list", length=ngroups) + #save the indices of the Theta matrices for the groups stored in GLIST + idx.ThetaMat <- which(names(lavobject@Model@GLIST)=="theta") + + for(g in seq_len(ngroups)) { # g<-1 + + if(ngroups>1L){ + yhat_group <- yhat[[g]] + } else { + yhat_group <- yhat + } + + nsize <- lavobject@Data@nobs[[g]] + nvar <- lavobject@Model@nvar[[g]] + Data <- lavobject@Data@X[[g]] + TH <- lavobject@Fit@TH[[g]] + th.idx <- lavobject@Model@th.idx[[g]] + Theta <- lavobject@Model@GLIST[ idx.ThetaMat[g] ]$theta + error.stddev <- diag(Theta)^0.5 + + #for the computation of the univariate probabilities + nlev <- lavobject@Data@ov$nlev + idx.uniy <- rep(1:nvar, times=nlev) + + #indices vectors for the computation of bivariate probabilities + idx.pairs.yiyj <- combn(1:nvar,2) + no_biv_resp_cat_yiyj <- sapply(1:ncol(idx.pairs.yiyj), function(x){ + prod( nlev[idx.pairs.yiyj[,x]] ) }) + idx.y1 <- unlist( + mapply(rep, idx.pairs.yiyj[1,], each= no_biv_resp_cat_yiyj) ) + idx.y2 <- unlist( + mapply(rep, idx.pairs.yiyj[2,], each= no_biv_resp_cat_yiyj) ) + + + univariateProb[[g]] <- matrix(0, nrow = nsize, ncol = sum(nlev) ) + pairwiseProb[[g]] <- matrix(0, nrow = nsize, + ncol = length(lavobject@Cache[[g]]$bifreq)) + + idx.MissVar.casewise <- apply(Data, 1, function(x) { which(is.na(x)) } ) + + for(i in 1:nsize){ + idx.MissVar <- idx.MissVar.casewise[[i]] + noMissVar <- length(idx.MissVar) + + if( noMissVar>0L ) { + #compute the univariate probabilities + TH.list <- split(TH,th.idx) + tmp.TH <- TH.list[idx.MissVar] + tmp.lowerTH <- unlist(lapply(tmp.TH, function(x){c(-Inf,x)})) + tmp.upperTH <- unlist(lapply(tmp.TH, function(x){c(x,Inf) })) + + idx.items <- rep(c(1:noMissVar), times=nlev[idx.MissVar]) + tmp.mean <- yhat_group[i,idx.MissVar] + tmp.mean.extended <- tmp.mean[idx.items] + tmp.stddev <- error.stddev[idx.MissVar] + tmp.stddev.extended <- tmp.stddev[idx.items] + + tmp.uniProb <- pnorm( (tmp.upperTH - tmp.mean.extended )/ + tmp.stddev.extended ) - + pnorm( (tmp.lowerTH - tmp.mean.extended )/ + tmp.stddev.extended ) + idx.columnsUni <- which(idx.uniy %in% idx.MissVar) + univariateProb[[g]][i, idx.columnsUni] <- tmp.uniProb + + #compute the bivariate probabilities + if( noMissVar>1L ) { + idx.pairsMiss <- combn(idx.MissVar ,2) + no.pairs <- ncol(idx.pairsMiss) + idx.pairsV2 <- combn(noMissVar, 2) + idx.columns <- unlist(lapply(1:no.pairs, function(x){ + which( (idx.y1 == idx.pairsMiss[1,x]) & + (idx.y2 == idx.pairsMiss[2,x]) ) } ) ) + + if( all( Theta[t(idx.pairsMiss)]==0 ) ){ #items independence given eta + tmp.uniProb.list <- split(tmp.uniProb, idx.items) + pairwiseProb[[g]][i, idx.columns] <- + unlist( lapply(1:no.pairs, function(x){ + c( outer(tmp.uniProb.list[[ idx.pairsV2[1,x] ]] , + tmp.uniProb.list[[ idx.pairsV2[2,x] ]] ) ) }) ) + } else { #when correlation between measurement errors + + tmp.th.idx <- th.idx[th.idx %in% idx.MissVar] + #recode so that it is always 1,1,..,1, 2,...,2, etc. + tmp.th.idx.recoded <- rep(c(1:noMissVar), times=table(tmp.th.idx)) + tmp.TH <- TH[th.idx %in% idx.MissVar] + + tmp.ind.vec <- LongVecInd(no.x = noMissVar, + all.thres = tmp.TH, + index.var.of.thres = tmp.th.idx.recoded) + + tmp.th.rho.vec <- LongVecTH.Rho.Generalised( + no.x = noMissVar, + TH = tmp.TH, + th.idx = tmp.th.idx.recoded, + cov.xixj = Theta[t(idx.pairsMiss)] , + mean.x = yhat_group[i,idx.MissVar], + stddev.x = error.stddev[idx.MissVar] ) + + tmp.bivProb <- pairwiseExpProbVec(ind.vec = tmp.ind.vec , + th.rho.vec = tmp.th.rho.vec) + + pairwiseProb[[g]][i, idx.columns] <- tmp.bivProb + } #end of else of if( all( Theta[t(idx.pairsMiss)]==0 ) ) + # which checks item local independence + } #end of if( noMissVar>1L ) + + #cat(i, "\n") + } #end of if(noMissVar>0L) + } #end of for(i in 1:nsize) + + } #end of for(g in seq_len(lavobject@Data@ngroups)) + + list(univariateProbGivObs = univariateProb, + pairwiseProbGivObs = pairwiseProb) +} # end of the function pairwiseExpProbVec_GivenObs + +################################################################## + + + +# LongVecTH.Rho.Generalised function is defined as follows +LongVecTH.Rho.Generalised <- function(no.x, TH, th.idx, + cov.xixj, mean.x, stddev.x ) { + all.std.thres <- (TH - mean.x[th.idx]) / stddev.x[th.idx] + id.pairs <- utils::combn(no.x,2) + cor.xixj <- cov.xixj /( stddev.x[id.pairs[1,]] * stddev.x[id.pairs[2,]]) + + LongVecTH.Rho(no.x = no.x, + all.thres = all.std.thres, + index.var.of.thres = th.idx, + rho.xixj = cor.xixj) +} + +# LongVecTH.Rho.Generalised is a generalisation of the function +# lavaan:::LongVecTH.Rho . The latter assumes that all y* follow standard +# normal so the thresholds are automatically the standardised ones. +# LongVecTH.Rho.Generalised does not assume that, each of y*'s can follow +# a normal distribution with mean mu and standard deviation sigma. +# LongVecTH.Rho.Generalised has the following input arguments: +# no.x (same as in lavaan:::LongVecTH.Rho), +# TH (similar to the TH in lavaan:::LongVecTH.Rho but here they are the unstandardised thresholds, i.e. of the normal distribution with mean mu and standard deviation sigma) +# th.idx (same as index.var.of.thres in lavaan:::LongVecTH.Rho) +# cov.xixj which are the polychoric covariances of the pairs of underlying variables provided in a similar fashion as rho.xixj in lavaan:::LongVecTH.Rho) +# mean.x is a vector including the means of y*'s provided in the order mean.x1, mean.x2, ...., mean.xp +# stddev.x is a vector including the standard deviations of y*'s provided in the order stddev.x1, stddev.x2, ...., stddev.xp + +# The output of the new function is similar to that of lavaan:::LongVecTH.Rho############################################# + + + +#lavobject is the output of lavaan function where either the unconstrained +#or a hypothesized model has been fitted +pairwiseExpProbVec_GivenObs_UncMod <- function(lavobject) { + ngroups <- lavobject@Data@ngroups + TH <- lavobject@implied$th #these are the standardized thresholds + #mean and variance of y* have been taken into account + TH.IDX <- lavobject@SampleStats@th.idx + Sigma.hat <- lavobject@implied$cov + + univariateProb <- vector("list", length=ngroups) + pairwiseProb <- vector("list", length=ngroups) + + for(g in 1:ngroups) { + Sigma.hat.g <- Sigma.hat[[g]] + # is Sigma.hat always a correlation matrix? + Cor.hat.g <- cov2cor(Sigma.hat.g) + cors <- Cor.hat.g[lower.tri(Cor.hat.g)] + if(any(abs(cors) > 1)) { + warning("lavaan WARNING: some model-implied correlations + are larger than 1.0") + } + nvar <- nrow(Sigma.hat.g) + MEAN <- rep(0, nvar) + TH.g <- TH[[g]] + th.idx.g <- TH.IDX[[g]] + + nlev <- lavobject@Data@ov$nlev + + #create index vector to keep track which variable each column of + #univariateProb matrix refers to + idx.uniy <- rep(1:nvar, times=nlev) + + #create index vector to keep track which variables each column of + #pairwiseProb matrix refers to + idx.pairs.yiyj <- combn(1:nvar,2) + no_biv_resp_cat_yiyj <- sapply(1:ncol(idx.pairs.yiyj), function(x){ + prod( nlev[idx.pairs.yiyj[,x]] ) }) + idx.y1 <- unlist( + mapply(rep, idx.pairs.yiyj[1,], each= no_biv_resp_cat_yiyj) ) + idx.y2 <- unlist( + mapply(rep, idx.pairs.yiyj[2,], each= no_biv_resp_cat_yiyj) ) + + Data <- lavobject@Data@X[[g]] + nsize <- nrow(Data) + + #create the lists of matrices + univariateProb[[g]] <- matrix(0, nrow = nsize, ncol = sum(nlev) ) + pairwiseProb[[g]] <- matrix(0, nrow = nsize, + ncol = length(lavobject@Cache[[g]]$bifreq)) + + idx.MissVar.casewise <- apply(Data, 1, function(x) { + which(is.na(x)) } ) + + for(i in 1:nsize){ + idx.MissVar <- idx.MissVar.casewise[[i]] + noMissVar <- length(idx.MissVar) + + if( noMissVar>0L ) { + #compute the denominator of the conditional probability + TH.VAR <- lapply(1:nvar, function(x) c(-Inf, TH.g[th.idx.g==x], +Inf)) + lower <- sapply(1:nvar, function(x) TH.VAR[[x]][ Data[i,x] ]) + upper <- sapply(1:nvar, function(x) TH.VAR[[x]][ Data[i,x] + 1L ]) + lower.denom <- lower[-idx.MissVar] + upper.denom <- upper[-idx.MissVar] + MEAN.i <- MEAN[-idx.MissVar] + Corhat.i <- Cor.hat.g[-idx.MissVar, -idx.MissVar, drop=FALSE] + denom <- sadmvn(lower.denom, upper.denom, mean=MEAN.i, varcov=Corhat.i)[1] + } #end of if( noMissVar>0L ) + + if( noMissVar==1L ) { #only univariate probabilities for one item + #compute the numerator + TH.MissVar <- c(-Inf, TH.g[th.idx.g==idx.MissVar], +Inf) + #for all response categories of the missing item + no.cat <- nlev[idx.MissVar] + numer <- sapply(1:no.cat, function(x){ + lower[idx.MissVar] <- TH.MissVar[x] + upper[idx.MissVar] <- TH.MissVar[x+ 1L] + sadmvn(lower, upper, mean=MEAN, varcov=Cor.hat.g)[1] }) + idx.columnsUni <- which(idx.uniy %in% idx.MissVar) + univariateProb[[g]][i, idx.columnsUni] <- numer / denom + } #end of if( noMissVar==1L ) + + if( noMissVar>1L ) { + #compute the bivariate probabilities and based on them + #calculate the univariate ones + + #form all possible pairs of items with missing values + idx.pairsMiss <- combn(idx.MissVar ,2) + no.pairs <- ncol(idx.pairsMiss) + for(j in 1:no.pairs ) { + idx.Missy1y2 <- idx.pairsMiss[,j] + idx.Missy1 <- idx.Missy1y2[1] + idx.Missy2 <- idx.Missy1y2[2] + idx.MissRestItems <- idx.MissVar[ !(idx.MissVar %in% idx.Missy1y2)] + TH.Missy1 <- c(-Inf, TH.g[th.idx.g==idx.Missy1], +Inf) + TH.Missy2 <- c(-Inf, TH.g[th.idx.g==idx.Missy2], +Inf) + no.cat.Missy1 <- nlev[ idx.Missy1 ] + no.cat.Missy2 <- nlev[ idx.Missy2 ] + no.bivRespCat <- no.cat.Missy1 * no.cat.Missy2 + mat_bivRespCat <- matrix(1:no.bivRespCat, nrow= no.cat.Missy1, + ncol=no.cat.Missy2) + + numer <- sapply(1:no.bivRespCat, function(x){ + idx_y1_cat <- which(mat_bivRespCat==x, arr.ind=TRUE)[1] + idx_y2_cat <- which(mat_bivRespCat==x, arr.ind=TRUE)[2] + lower[idx.Missy1y2] <- + c( TH.Missy1[idx_y1_cat], TH.Missy2[idx_y2_cat] ) + upper[idx.Missy1y2] <- + c( TH.Missy1[idx_y1_cat+1L], TH.Missy2[idx_y2_cat+1L] ) + lower.tmp <- lower + upper.tmp <- upper + MEAN.tmp <- MEAN + Cor.hat.g.tmp <- Cor.hat.g + if( length(idx.MissRestItems)>0 ){ + lower.tmp <- lower[-idx.MissRestItems] + upper.tmp <- upper[-idx.MissRestItems] + MEAN.tmp <- MEAN[-idx.MissRestItems] + Cor.hat.g.tmp <- Cor.hat.g[-idx.MissRestItems, -idx.MissRestItems] + } + sadmvn(lower.tmp, upper.tmp, + mean=MEAN.tmp, varcov=Cor.hat.g.tmp)[1] + }) + + idx.columns <- which( (idx.y1 == idx.Missy1) & + (idx.y2 == idx.Missy2) ) + tmp_biv <- numer/denom + pairwiseProb[[g]][i, idx.columns] <- tmp_biv + + #compute the univariateProb based on the above bivariate + # probabilities + if(j==1L){ + univariateProb[[g]][i, which(idx.uniy %in% idx.Missy1) ] <- + apply(mat_bivRespCat, 1, function(x){ sum( tmp_biv[x])} ) + + univariateProb[[g]][i, which(idx.uniy %in% idx.Missy2) ] <- + apply(mat_bivRespCat, 2, function(x){ sum( tmp_biv[x])} ) + } + + if(j>1L & j1L ) + } #end of for(i in 1:nsize) + } # end of for(g in 1:ngroups) + + list(univariateProbGivObs = univariateProb, + pairwiseProbGivObs = pairwiseProb) +} #end of function + diff -Nru r-cran-lavaan-0.5.22/R/ctr_pml_plrt2.R r-cran-lavaan-0.5.23.1097/R/ctr_pml_plrt2.R --- r-cran-lavaan-0.5.22/R/ctr_pml_plrt2.R 2015-05-27 08:22:40.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/ctr_pml_plrt2.R 2017-01-29 14:43:27.000000000 +0000 @@ -19,8 +19,7 @@ fx <- lav_model_objective(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - lavcache = lavcache, - estimator = "PML") + lavcache = lavcache) H0.fx <- as.numeric(fx) H0.fx.group <- attr(fx, "fx.group") } else { @@ -45,8 +44,7 @@ fx <- lav_model_objective(lavmodel = fittedSat@Model, lavsamplestats = fittedSat@SampleStats, lavdata = fittedSat@Data, - lavcache = fittedSat@Cache, - estimator = "PML") + lavcache = fittedSat@Cache) SAT.fx <- as.numeric(fx) SAT.fx.group <- attr(fx, "fx.group") @@ -62,12 +60,11 @@ sample.th = computeTH(lavmodel), sample.th.idx = lavsamplestats@th.idx) - #Options$se <- lavoptions$se + Options2 <- Options + Options2$optim.method <- "none" + Options2$optim.force.converged <- TRUE fittedSat2 <- lavaan(ModelSat2, - control=list(optim.method = "none", - optim.force.converged = TRUE) , - #slotOptions = lavoptions, - slotOptions = Options, + slotOptions = Options2, slotSampleStats = lavsamplestats, slotData = lavdata, slotCache = lavcache) @@ -97,8 +94,8 @@ if(is.null(VCOV)) { H0.inv <- lav_model_information(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = "PML", lavcache = lavcache, - information = "observed", augmented = TRUE, inverted = TRUE) + lavcache = lavcache, information = "observed", + augmented = TRUE, inverted = TRUE) } else { H0.inv <- attr(VCOV, "E.inv") } @@ -107,12 +104,12 @@ if(is.null(VCOV)) { J0 <- lav_model_information_firstorder(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = "PML", lavcache = lavcache)[,] + lavcache = lavcache)[,] } else { # we do not get J, but J.group, FIXME? J0 <- lav_model_information_firstorder(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = "PML", lavcache = lavcache)[,] + lavcache = lavcache)[,] } # inverted Godambe information diff -Nru r-cran-lavaan-0.5.22/R/ctr_pml_plrt.R r-cran-lavaan-0.5.23.1097/R/ctr_pml_plrt.R --- r-cran-lavaan-0.5.22/R/ctr_pml_plrt.R 2016-07-20 17:14:55.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/ctr_pml_plrt.R 2017-01-29 14:42:26.000000000 +0000 @@ -19,8 +19,7 @@ fx <- lav_model_objective(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - lavcache = lavcache, - estimator = "PML") + lavcache = lavcache) H0.fx <- as.numeric(fx) H0.fx.group <- attr(fx, "fx.group") } else { @@ -45,8 +44,7 @@ fx <- lav_model_objective(lavmodel = fittedSat@Model, lavsamplestats = fittedSat@SampleStats, lavdata = fittedSat@Data, - lavcache = fittedSat@Cache, - estimator = "PML") + lavcache = fittedSat@Cache) SAT.fx <- as.numeric(fx) SAT.fx.group <- attr(fx, "fx.group") @@ -62,14 +60,13 @@ sample.th = computeTH(lavmodel), sample.th.idx = lavsamplestats@th.idx) - #Options$se <- lavoptions$se + Options2 <- Options + Options2$optim.method <- "none" + Options2$optim.force.converged <- TRUE fittedSat2 <- lavaan(ModelSat2, - control=list(optim.method = "none", - optim.force.converged = TRUE) , - #slotOptions = lavoptions, - slotOptions = Options, - slotSampleStats = lavsamplestats, - slotData = lavdata, slotCache = lavcache) + slotOptions = Options2, + slotSampleStats = lavsamplestats, + slotData = lavdata, slotCache = lavcache) # the code below was contributed by Myrsini Katsikatsou (Jan 2015) diff -Nru r-cran-lavaan-0.5.22/R/lav_bootstrap.R r-cran-lavaan-0.5.23.1097/R/lav_bootstrap.R --- r-cran-lavaan-0.5.22/R/lav_bootstrap.R 2016-07-20 17:16:08.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_bootstrap.R 2017-01-29 15:12:22.000000000 +0000 @@ -53,11 +53,14 @@ stop("lavaan ERROR: this function is not (yet) available if conditional.x = TRUE") } + lavoptions. <- list(parallel = parallel, ncpus = ncpus, cl = cl, + iseed = iseed) + bootstrap.internal(object = object, lavdata. = NULL, lavmodel. = NULL, lavsamplestats. = NULL, - lavoptions. = NULL, + lavoptions. = lavoptions., lavpartable. = NULL, R = R, type = type., @@ -65,10 +68,6 @@ FUN = FUN, warn = warn, return.boot = return.boot, - parallel = parallel, - ncpus = ncpus, - cl = cl, - iseed = iseed, h0.rmsea = h0.rmsea, ...) } @@ -87,10 +86,6 @@ FUN = "coef", warn = 0L, return.boot = FALSE, - parallel = c("no", "multicore", "snow"), - ncpus = 1L, - cl = NULL, - iseed = NULL, h0.rmsea = NULL, ...) { @@ -130,13 +125,15 @@ } } + parallel <- lavoptions$parallel + ncpus <- lavoptions$ncpus + cl <- lavoptions$cl + iseed <- lavoptions$iseed + # prepare old_options <- options(); options(warn = warn) - if (missing(parallel)) parallel <- "no" # the next 10 lines are borrowed from the boot package - parallel <- match.arg(parallel) have_mc <- have_snow <- FALSE - ncpus <- ncpus if (parallel != "no" && ncpus > 1L) { if (parallel == "multicore") have_mc <- .Platform$OS.type != "windows" else if (parallel == "snow") have_snow <- TRUE diff -Nru r-cran-lavaan-0.5.22/R/lav_cor.R r-cran-lavaan-0.5.23.1097/R/lav_cor.R --- r-cran-lavaan-0.5.22/R/lav_cor.R 2016-07-12 17:25:45.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_cor.R 2017-02-02 07:42:46.000000000 +0000 @@ -52,7 +52,7 @@ lav.data <- lavData(data = object, group = group, ov.names = NAMES, ordered = ordered, ov.names.x = ov.names.x, - missing = missing) + lavoptions = list(missing = missing)) } else { stop("lavaan ERROR: lavCor can not handle objects of class ", paste(class(object), collapse= " ")) @@ -131,7 +131,8 @@ out <- inspect(fit, "sampstat") } else if(output %in% c("parameterEstimates", "pe", "parameterestimates", "est")) { - out <- parameterEstimates(fit) + #out <- parameterEstimates(fit) + out <- standardizedSolution(fit) } else { out <- fit } diff -Nru r-cran-lavaan-0.5.22/R/lav_data.R r-cran-lavaan-0.5.23.1097/R/lav_data.R --- r-cran-lavaan-0.5.22/R/lav_data.R 2016-07-19 11:39:02.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_data.R 2017-02-24 10:20:40.000000000 +0000 @@ -9,22 +9,59 @@ # # initial version: YR 14 April 2012 +# YR 23 feb 2017: blocks/levels/groups, but everything is group-based! + +# FIXME: if nlevels > 1L, and ngroups > 1L, we should check that +# group is at the upper-level + # extract the data we need for this particular model lavData <- function(data = NULL, # data.frame group = NULL, # multiple groups? - group.label = NULL, # custom group labels? + cluster = NULL, # clusters? ov.names = NULL, # variables needed in model - ordered = NULL, # ordered variables ov.names.x = character(0), # exo variables - std.ov = FALSE, # standardize ov's? - missing = "listwise", # remove missings? + ordered = NULL, # ordered variables sample.cov = NULL, # sample covariance(s) sample.mean = NULL, # sample mean vector(s) sample.nobs = NULL, # sample nobs - warn = TRUE, # produce warnings? + + lavoptions = lavOptions(), # lavoptions allow.single.case = FALSE # allow single case (for newdata in predict) ) { + + # get info from lavoptions + + # group.labels + group.label <- lavoptions$group.label + if(is.null(group.label)) { + group.label <- character(0L) + } + + # level.labels + level.label <- lavoptions$level.label + if(is.null(level.label)) { + level.label <- character(0L) + } + + # std.ov? + std.ov <- lavoptions$std.ov + if(is.null(std.ov)) { + std.ov <- FALSE + } + + # missing? + missing <- lavoptions$missing + if(is.null(missing) || missing == "default") { + missing <- "listwise" + } + + # warn? + warn <- lavoptions$warn + if(is.null(warn)) { + warn <- TRUE + } + # four scenarios: # 0) data is already a lavData object: do nothing # 1) data is full data.frame (or a matrix) @@ -64,7 +101,9 @@ lavData <- lav_data_full(data = data, group = group, + cluster = cluster, group.label = group.label, + level.label = level.label, ov.names = ov.names, ordered = ordered, ov.names.x = ov.names.x, @@ -78,18 +117,14 @@ # 2) sample moments if(is.null(data) && !is.null(sample.cov)) { + + # for now: no levels!! + nlevels <- 1L # we also need the number of observations (per group) if(is.null(sample.nobs)) stop("lavaan ERROR: please specify number of observations") - # if meanstructure=TRUE, we need sample.mean - #if(meanstructure == TRUE && is.null(sample.mean)) - # stop("lavaan ERROR: please provide sample.mean if meanstructure=TRUE") - # if group.equal contains "intercepts", we need sample.mean - #if("intercepts" %in% group.equal && is.null(sample.mean)) - # stop("lavaan ERROR: please provide sample.mean if group.equal contains \"intercepts\"") - # list? if(is.list(sample.cov)) { # multiple groups, multiple cov matrices @@ -99,7 +134,7 @@ # multiple groups, multiple cov matrices ngroups <- length(sample.cov) LABEL <- names(sample.cov) - if(is.null(group.label)) { + if(is.null(group.label) || length(group.label) == 0L) { if(is.null(LABEL)) group.label <- paste("Group ", 1:ngroups, sep="") else @@ -124,7 +159,7 @@ ov.names <- lapply(sample.cov, row.names) } else if (!is.list(ov.names)) { # duplicate ov.names for each group - tmp <- ov.names; ov.names <- vector("list", length=ngroups) + tmp <- ov.names; ov.names <- vector("list", length = ngroups) ov.names[1:ngroups] <- list(tmp) } else { if (length(ov.names) != ngroups) @@ -135,7 +170,7 @@ # handle ov.names.x if(!is.list(ov.names.x)) { - tmp <- ov.names.x; ov.names.x <- vector("list", length=ngroups) + tmp <- ov.names.x; ov.names.x <- vector("list", length = ngroups) ov.names.x[1:ngroups] <- list(tmp) } else { if(length(ov.names.x) != ngroups) @@ -144,7 +179,7 @@ } ov <- list() - ov$name <- unique(unlist(c(ov.names,ov.names.x))) + ov$name <- unique( unlist(c(ov.names, ov.names.x)) ) nvar <- length(ov$name) ov$idx <- rep(NA, nvar) ov$nobs <- rep(sample.nobs, nvar) @@ -160,32 +195,42 @@ data.type = "moment", ngroups = ngroups, group = character(0L), + nlevels = 1L, # for now + cluster = character(0L), group.label = group.label, + level.label = character(0L), nobs = as.list(sample.nobs), norig = as.list(sample.nobs), ov.names = ov.names, ov.names.x = ov.names.x, + ordered = as.character(ordered), ov = ov, std.ov = FALSE, missing = "listwise", - case.idx = vector("list", length=ngroups), - Mp = vector("list", length=ngroups), - Rp = vector("list", length=ngroups), - X = vector("list", length=ngroups), - eXo = vector("list", length=ngroups) + case.idx = vector("list", length = ngroups), + Mp = vector("list", length = ngroups), + Rp = vector("list", length = ngroups), + Lp = vector("list", length = ngroups), + X = vector("list", length = ngroups), + eXo = vector("list", length = ngroups) ) } # 3) data.type = "none": both data and sample.cov are NULL if(is.null(data) && is.null(sample.cov)) { + + # no levels + nlevels <- 1L + if(is.null(sample.nobs)) sample.nobs <- 0L sample.nobs <- as.list(sample.nobs) ngroups <- length(unlist(sample.nobs)) - if(ngroups > 1L) + if(ngroups > 1L) { group.label <- paste("Group ", 1:ngroups, sep="") - else + } else { group.label <- character(0) + } # handle ov.names if(is.null(ov.names)) { @@ -193,7 +238,7 @@ ov.names <- character(0L) } if(!is.list(ov.names)) { - tmp <- ov.names; ov.names <- vector("list", length=ngroups) + tmp <- ov.names; ov.names <- vector("list", length = ngroups) ov.names[1:ngroups] <- list(tmp) } else { if (length(ov.names) != ngroups) @@ -203,12 +248,12 @@ } # handle ov.names.x if(!is.list(ov.names.x)) { - tmp <- ov.names.x; ov.names.x <- vector("list", length=ngroups) + tmp <- ov.names.x; ov.names.x <- vector("list", length = ngroups) ov.names.x[1:ngroups] <- list(tmp) } ov <- list() - ov$name <- unique(unlist(c(ov.names,ov.names.x))) + ov$name <- unique( unlist(c(ov.names, ov.names.x)) ) nvar <- length(ov$name) ov$idx <- rep(NA, nvar) ov$nobs <- rep(0L, nvar) @@ -219,18 +264,23 @@ data.type = "none", ngroups = ngroups, group = character(0L), + nlevels = 1L, # for now + cluster = character(0L), group.label = group.label, + level.label = character(0L), nobs = sample.nobs, norig = sample.nobs, ov.names = ov.names, ov.names.x = ov.names.x, + ordered = as.character(ordered), ov = ov, missing = "listwise", - case.idx = vector("list", length=ngroups), - Mp = vector("list", length=ngroups), - Rp = vector("list", length=ngroups), - X = vector("list", length=ngroups), - eXo = vector("list", length=ngroups) + case.idx = vector("list", length = ngroups), + Mp = vector("list", length = ngroups), + Rp = vector("list", length = ngroups), + Lp = vector("list", length = ngroups), + X = vector("list", length = ngroups), + eXo = vector("list", length = ngroups) ) } @@ -241,7 +291,9 @@ # handle full data lav_data_full <- function(data = NULL, # data.frame group = NULL, # multiple groups? + cluster = NULL, group.label = NULL, # custom group labels? + level.label = NULL, ov.names = NULL, # variables needed # in model ordered = NULL, # ordered variables @@ -294,7 +346,48 @@ group <- character(0L) } - # ov.names + # cluster + # number of levels and level labels + if(!is.null(cluster) && length(cluster) > 0L) { + # cluster variable in data? + if(!all(cluster %in% names(data))) { + # which one did we not find? + not.ok <- which(!cluster %in% names(data)) + + stop("lavaan ERROR: cluster variable(s) ", sQuote(cluster[not.ok]), + " not found;\n ", + "variable names found in data frame are:\n ", + paste(names(data), collapse = " ")) + } + # default level.labels + if(length(level.label) == 0L) { + level.label <- c("within", cluster) + } else { + # check if length(level.label) = 1 + length(cluster) + if(length(level.label) != length(cluster) + 1L) { + stop("lavaan ERROR: length(level.label) != length(cluster) + 1L") + } + # nothing to do + } + # check for missing values in cluster variable(s) + for(cl in 1:length(cluster)) { + if(warn && any(is.na(data[[cluster[cl]]]))) { + warning("lavaan WARNING: cluster variable ", + sQuote(cluster[cl]), + " contains missing values\n", sep = "") + } + } + nlevels <- length(level.label) + } else { + if(warn && length(level.label) > 0L) + warning("lavaan WARNING: `level.label' argument", + " will be ignored if `cluster' argument is missing") + nlevels <- 1L + level.label <- character(0L) + cluster <- character(0L) + } + + # ov.names (still needed???) if(is.null(ov.names)) { ov.names <- names(data) # remove 'group' name from ov.names @@ -302,8 +395,14 @@ group.idx <- which(ov.names == group) ov.names <- ov.names[-group.idx] } + # remove 'cluster' names from ov.names + if(length(cluster) > 0L) { + cluster.idx <- which(ov.names %in% cluster) + ov.names <- ov.names[-cluster.idx] + } } + # check ov.names vs ngroups if(ngroups > 1L) { if(is.list(ov.names)) { if(length(ov.names) != ngroups) @@ -311,7 +410,7 @@ " groups; data contains ", ngroups, " groups") } else { tmp <- ov.names - ov.names <- vector("list", length=ngroups) + ov.names <- vector("list", length = ngroups) ov.names[1:ngroups] <- list(tmp) } if(is.list(ov.names.x)) { @@ -320,7 +419,7 @@ " groups; data contains ", ngroups, " groups") } else { tmp <- ov.names.x - ov.names.x <- vector("list", length=ngroups) + ov.names.x <- vector("list", length = ngroups) ov.names.x[1:ngroups] <- list(tmp) } } else { @@ -338,7 +437,7 @@ } } - # construct OV list -- FIXME: surely, this can be done more elegantly?? + # check if all ov.names can be found in the data.frame for(g in 1:ngroups) { # does the data contain all the observed variables # needed in the user-specified model for this group @@ -374,7 +473,7 @@ # here, we know for sure all ov.names exist in the data.frame # create varTable - # FIXME: should we add the 'group' variable (no for now) + # FIXME: should we add the 'group'/'cluster' variable (no for now) ov <- lav_dataframe_vartable(frame = data, ov.names = ov.names, ov.names.x = ov.names.x, ordered = ordered, as.data.frame. = FALSE) @@ -440,16 +539,20 @@ warning("lavaan WARNING: all observed variables are exogenous; model may not be identified") } - # prepare empty list for data.matrix per group - case.idx <- vector("list", length=ngroups) - nobs <- vector("list", length=ngroups) - norig <- vector("list", length=ngroups) - Mp <- vector("list", length=ngroups) - X <- vector("list", length=ngroups) - eXo <- vector("list", length=ngroups) - Rp <- vector("list", length=ngroups) + # prepare empty lists + + # group-based + case.idx <- vector("list", length = ngroups) + Mp <- vector("list", length = ngroups) + Rp <- vector("list", length = ngroups) + norig <- vector("list", length = ngroups) + nobs <- vector("list", length = ngroups) + X <- vector("list", length = ngroups) + eXo <- vector("list", length = ngroups) + Lp <- vector("list", length = ngroups) + - # for each group + # collect information per upper-level group for(g in 1:ngroups) { # extract variables in correct order @@ -461,7 +564,7 @@ if(ngroups > 1L || length(group.label) > 0L) { if(missing == "listwise") { case.idx[[g]] <- which(data[[group]] == group.label[g] & - complete.cases(data[all.idx])) + complete.cases(data[all.idx])) nobs[[g]] <- length(case.idx[[g]]) norig[[g]] <- length(which(data[[group]] == group.label[g])) } else if(missing == "pairwise" && length(exo.idx) > 0L) { @@ -489,15 +592,14 @@ } # extract data - X[[g]] <- data.matrix( data[case.idx[[g]], ov.idx, drop=FALSE] ) + X[[g]] <- data.matrix( data[case.idx[[g]], ov.idx, drop = FALSE] ) dimnames(X[[g]]) <- NULL ### copy? - # manually construct integers for user-declared 'ordered' factors + # construct integers for user-declared 'ordered' factors # FIXME: is this really (always) needed??? - # (but it is still better than doing lapply(data[,idx], ordered) which + # (but still better than doing lapply(data[,idx], ordered) which # generated even more copies) - user.ordered.names <- ov$name[ov$type == "ordered" & - ov$user == 1L] + user.ordered.names <- ov$name[ov$type == "ordered" & ov$user == 1L] user.ordered.idx <- which(ov.names[[g]] %in% user.ordered.names) if(length(user.ordered.idx) > 0L) { for(i in user.ordered.idx) { @@ -508,18 +610,18 @@ ## FIXME: ## - why also in X? (for samplestats, for now) if(length(exo.idx) > 0L) { - eXo[[g]] <- data.matrix( data[case.idx[[g]], exo.idx, drop=FALSE] ) + eXo[[g]] <- data.matrix(data[case.idx[[g]], exo.idx, drop = FALSE]) dimnames(eXo[[g]]) <- NULL } else { eXo[g] <- list(NULL) } - #print( tracemem(X[[g]]) ) # standardize observed variables? numeric only! if(std.ov) { num.idx <- which(ov.names[[g]] %in% ov$name & ov$type == "numeric") if(length(num.idx) > 0L) { - X[[g]][,num.idx] <- scale(X[[g]][,num.idx,drop=FALSE])[,,drop = FALSE] + X[[g]][,num.idx] <- + scale(X[[g]][,num.idx,drop = FALSE])[,,drop = FALSE] # three copies are made!!!!! } if(length(exo.idx) > 0L) { @@ -530,26 +632,14 @@ # missing data if(missing != "listwise") { # get missing patterns - Mp[[g]] <- lav_data_missing_patterns(X[[g]], sort.freq = TRUE, - coverage = TRUE) + Mp[[g]] <- lav_data_missing_patterns(X[[g]], + sort.freq = TRUE, coverage = TRUE) # checking! if(length(Mp[[g]]$empty.idx) > 0L) { - #X[[g]] <- X[[g]][-Mp[[g]]$empty.idx,,drop=FALSE] - # remove from case.idx - # idx <- which(case.idx[[g]] %in% Mp[[g]]$empty.idx) - #empty.idx <- Mp[[g]]$empty.idx - #empty.case.idx <- case.idx[[g]][empty.idx] empty.case.idx <- Mp[[g]]$empty.idx - #case.idx[[g]] <- case.idx[[g]][-empty.idx] - # remove from eXo - #if(length(exo.idx) > 0L) { - # eXo[[g]] <- eXo[[g]][-empty.idx,,drop=FALSE] - #} if(warn) { warning("lavaan WARNING: some cases are empty and will be ignored:\n ", paste(empty.case.idx, collapse=" ")) } - # give empty.idx case.idx? (for multiple groups): - #Mp[[g]]$empty.idx <- empty.case.idx } if(warn && any(Mp[[g]]$coverage < 0.1)) { warning("lavaan WARNING: due to missing values, some pairwise combinations have less than 10% coverage") @@ -573,28 +663,41 @@ "\n nobs = ", nobs[[g]], " nvar = ", nvar) } - } # ngroups + # cluster information + if(nlevels > 1L) { + # extract cluster variable(s), for this group + clus <- data.matrix(data[case.idx[[g]], cluster]) + Lp[[g]] <- lav_data_cluster_patterns(Y = X[[g]], clus = clus, + cluster = cluster) + } + + } # groups, at first level lavData <- new("lavData", - data.type = "full", - ngroups = ngroups, - group = group, - group.label = group.label, - std.ov = std.ov, - nobs = nobs, - norig = norig, - ov.names = ov.names, - ov.names.x = ov.names.x, - #ov.types = ov.types, - #ov.idx = ov.idx, - ov = ov, - case.idx = case.idx, - missing = missing, - X = X, - eXo = eXo, - Mp = Mp, - Rp = Rp - ) + data.type = "full", + ngroups = ngroups, + group = group, + nlevels = nlevels, + cluster = cluster, + group.label = group.label, + level.label = level.label, + std.ov = std.ov, + nobs = nobs, + norig = norig, + ov.names = ov.names, + ov.names.x = ov.names.x, + #ov.types = ov.types, + #ov.idx = ov.idx, + ordered = as.character(ordered), + ov = ov, + case.idx = case.idx, + missing = missing, + X = X, + eXo = eXo, + Mp = Mp, + Rp = Rp, + Lp = Lp + ) lavData } @@ -662,34 +765,34 @@ } # get response patterns (ignore empty cases!) -lav_data_resp_patterns <- function(X) { +lav_data_resp_patterns <- function(Y) { # construct TRUE/FALSE matrix: TRUE if value is observed - OBS <- !is.na(X) + OBS <- !is.na(Y) # empty cases empty.idx <- which(rowSums(OBS) == 0L) - # remove empty cases + # removeYempty cases if(length(empty.idx) > 0L) { - X <- X[-empty.idx,,drop = FALSE] + Y <- Y[-empty.idx,,drop = FALSE] } - ntotal <- nrow(X); nvar <- ncol(X) + ntotal <- nrow(Y); nvar <- ncol(Y) # identify, label and sort response patterns - id <- apply(X, MARGIN = 1, paste, collapse = "") + id <- apply(Y, MARGIN = 1, paste, collapse = "") # sort patterns (from high occurence to low occurence) TABLE <- sort(table(id), decreasing = TRUE) order <- names(TABLE) npatterns <- length(TABLE) - pat <- X[match(order, id), , drop = FALSE] + pat <- Y[match(order, id), , drop = FALSE] row.names(pat) <- as.character(TABLE) # handle NA? - X[is.na(X)] <- -9 - total.patterns <- prod(apply(X, 2, function(x) length(unique(x)))) + Y[is.na(Y)] <- -9 + total.patterns <- prod(apply(Y, 2, function(x) length(unique(x)))) empty.patterns <- total.patterns - npatterns # return a list #out <- list(nobs=ntotal, nvar=nvar, @@ -702,6 +805,33 @@ out } + +# get cluster information +# - cluster can be a vector! +# - clus can contain multiple columns! +lav_data_cluster_patterns <- function(Y = NULL, clus = NULL, cluster = NULL) { + + # how many levels? + nlevels <- length(cluster) + + # check clus + stopifnot(ncol(clus) == nlevels, nrow(Y) == nrow(clus)) + + cluster.size <- vector("list", length = nlevels) + cluster.id <- vector("list", length = nlevels) + nclusters <- vector("list", length = nlevels) + # for each clustering variable + for(l in 1:nlevels) { + cluster.size[[l]] <- as.integer(table(clus[,l])) + cluster.id[[l]] <- unique(clus[,l]) + nclusters[[l]] <- length(cluster.size[[l]]) + } + + out <- list(cluster = cluster, clus = clus, nclusters = nclusters, + cluster.size = cluster.size, cluster.id = cluster.id) + + out +} setMethod("show", "lavData", function(object) { diff -Nru r-cran-lavaan-0.5.22/R/lav_export_bugs.R r-cran-lavaan-0.5.23.1097/R/lav_export_bugs.R --- r-cran-lavaan-0.5.22/R/lav_export_bugs.R 2015-02-06 08:31:08.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_export_bugs.R 2017-02-21 10:50:43.000000000 +0000 @@ -5,7 +5,7 @@ # get parameter table attributes pta <- lav_partable_attributes(partable = partable, pta = pta) - vnames <- pta$vnames; ngroups <- pta$ngroups + vnames <- pta$vnames; nblocks <- pta$nblocks nvar <- pta$nvar; nfac <- pta$nfac # sanity check @@ -25,13 +25,13 @@ } # model for every i - for(g in 1:ngroups) { + for(g in 1:nblocks) { ov.names <- vnames$ov[[g]] lv.names <- vnames$lv[[g]] yname <- paste("y", g, sep="") - if(ngroups > 1L) { + if(nblocks > 1L) { TXT <- paste(TXT, t1, - "# group ", g, "\n", sep="") + "# block ", g, "\n", sep="") } else { TXT <- paste(TXT, "\n") } @@ -44,7 +44,7 @@ "# ov.nox", sep="") for(i in 1:nov) { ov.idx <- match(ov.names.nox[i], ov.names) - theta.free.idx <- which(partable$group == g & + theta.free.idx <- which(partable$block == g & partable$op == "~~" & partable$lhs == partable$rhs & partable$lhs == ov.names.nox[i]) @@ -68,7 +68,7 @@ # find rhs for this observed variable # 1. intercept? - int.idx <- which(partable$group == g & + int.idx <- which(partable$block == g & partable$op == "~1" & partable$lhs == ov.names.nox[i]) if(length(int.idx) == 1L) { @@ -85,7 +85,7 @@ } # 2. factor loading? - lam.idx <- which(partable$group == g & + lam.idx <- which(partable$block == g & partable$op == "=~" & partable$rhs == ov.names.nox[i]) for(j in lam.idx) { @@ -104,7 +104,7 @@ } # 3. regression? - r.idx <- which(partable$group == g & + r.idx <- which(partable$block == g & partable$op == "~" & partable$lhs == ov.names.nox[i]) for(j in r.idx) { @@ -138,7 +138,7 @@ "# lv.y", sep="") lv.y.idx <- match(lv.y, lv.names); ny <- length(lv.y.idx) for(j in 1:ny) { - theta.free.idx <- which(partable$group == g & + theta.free.idx <- which(partable$block == g & partable$op == "~~" & partable$lhs == partable$rhs & partable$lhs == lv.y[j]) @@ -160,7 +160,7 @@ # lhs elements regression # 1. intercept? - int.idx <- which(partable$group == g & + int.idx <- which(partable$block == g & partable$op == "~1" & partable$lhs == lv.y[j]) if(length(int.idx) == 1L) { @@ -176,7 +176,7 @@ TXT <- paste(TXT, "0", sep="") } - rhs.idx <- which(partable$group == g & + rhs.idx <- which(partable$block == g & partable$op == "~" & partable$lhs == lv.y[j]) np <- length(rhs.idx) @@ -212,9 +212,9 @@ # exogenous ov ??? (what to do here?) - # end of this group + # end of this block TXT <- paste(TXT, "\n\n", t1, - "} # end of group ", g, "\n", sep="") + "} # end of block ", g, "\n", sep="") } # priors (both fixed and free) @@ -224,7 +224,7 @@ for(i in seq_len(npt)) { if(partable$free[i] == 0L) next # skip non-free parameters lhs <- partable$lhs[i]; op <- partable$op[i]; rhs <- partable$rhs[i] - free.idx <- partable$free[i]; g <- partable$group[i] + free.idx <- partable$free[i]; g <- partable$block[i] if(op == "=~") { # factor loading TXT <- paste(TXT, "\n", t1, @@ -287,8 +287,8 @@ TXT <- paste(TXT, "\n\n", t1, "# Priors free parameters (multivariate):", sep="") - for(g in 1:ngroups) { - lv.phi.idx <- which(partable$group == g & + for(g in 1:nblocks) { + lv.phi.idx <- which(partable$block == g & partable$op == "~~" & partable$lhs %in% vnames$lv.x[[g]] & partable$rhs %in% vnames$lv.x[[g]]) diff -Nru r-cran-lavaan-0.5.22/R/lav_export_mplus.R r-cran-lavaan-0.5.23.1097/R/lav_export_mplus.R --- r-cran-lavaan-0.5.22/R/lav_export_mplus.R 2016-07-18 12:18:25.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_export_mplus.R 2017-02-20 15:51:57.000000000 +0000 @@ -6,7 +6,7 @@ footer <- "\n" lav <- as.data.frame(lav, stringsAsFactors=FALSE) - ngroups <- max(lav$group) + ngroups <- lav_partable_ngroups(lav) lav_one_group <- function(lav) { @@ -98,7 +98,7 @@ # group 1 body <- lav_one_group(lav[lav$group == 1,]) - if(is.null(group.label)) { + if(is.null(group.label) || length(group.label) == 0L) { group.label <- paste(1:ngroups) } diff -Nru r-cran-lavaan-0.5.22/R/lav_fit_measures.R r-cran-lavaan-0.5.23.1097/R/lav_fit_measures.R --- r-cran-lavaan-0.5.22/R/lav_fit_measures.R 2016-09-24 12:59:22.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_fit_measures.R 2017-02-20 19:03:55.000000000 +0000 @@ -10,7 +10,7 @@ baseline.model = baseline.model) }) -lav_fit_measures <- function(object, fit.measures="all", +lav_fit_measures <- function(object, fit.measures="all", baseline.model = NULL) { # has the model converged? @@ -43,7 +43,7 @@ # N versus N-1 # this affects BIC, RMSEA, cn_01/05, MFI and ECVI # Changed 0.5-15: suggestion by Mark Seeto - if(object@Options$estimator %in% c("ML","PML","FML") && + if(object@Options$estimator %in% c("ML","PML","FML") && object@Options$likelihood == "normal") { N <- object@SampleStats@ntotal } else { @@ -80,10 +80,10 @@ } } #if(is.na(X2) && is.finite(fx)) { - # + # #} - - if(test %in% c("satorra.bentler", "yuan.bentler", + + if(test %in% c("satorra.bentler", "yuan.bentler", "mean.var.adjusted", "scaled.shifted")) { scaled <- TRUE } else { @@ -102,14 +102,14 @@ # basic chi-square test fit.chisq <- c("fmin", "chisq", "df", "pvalue") if(scaled) { - fit.chisq <- c(fit.chisq, "chisq.scaled", "df.scaled", "pvalue.scaled", + fit.chisq <- c(fit.chisq, "chisq.scaled", "df.scaled", "pvalue.scaled", "chisq.scaling.factor") } # basline model fit.baseline <- c("baseline.chisq", "baseline.df", "baseline.pvalue") if(scaled) { - fit.baseline <- c(fit.baseline, "baseline.chisq.scaled", + fit.baseline <- c(fit.baseline, "baseline.chisq.scaled", "baseline.df.scaled", "baseline.pvalue.scaled", "baseline.chisq.scaling.factor") } @@ -124,14 +124,14 @@ # more incremental fit indices fit.incremental <- c("cfi", "tli", "nnfi", "rfi", "nfi", "pnfi", "ifi", "rni") - if(scaled) { - fit.incremental <- c(fit.incremental, "cfi.scaled", "tli.scaled", + if(scaled) { + fit.incremental <- c(fit.incremental, "cfi.scaled", "tli.scaled", "cfi.robust", "tli.robust", "nnfi.scaled", "nnfi.robust", "rfi.scaled", "nfi.scaled", "ifi.scaled", "rni.scaled", "rni.robust") } - + # likelihood based measures if(estimator == "MML") { fit.logl <- c("logl", "aic", "bic", "ntotal", "bic2") @@ -162,7 +162,7 @@ "srmr_mplus", "srmr_mplus_nomean") } else { fit.srmr <- c("srmr") - fit.srmr2 <- c("rmr", "rmr_nomean", + fit.srmr2 <- c("rmr", "rmr_nomean", "srmr", # the default "srmr_bentler", "srmr_bentler_nomean", "srmr_bollen", "srmr_bollen_nomean", @@ -194,14 +194,14 @@ if(length(fit.measures) == 1L) { if(fit.measures == "default") { if(estimator == "ML" || estimator == "PML") { - fit.measures <- c(fit.always, fit.chisq, fit.baseline, - fit.cfi.tli, fit.logl, + fit.measures <- c(fit.always, fit.chisq, fit.baseline, + fit.cfi.tli, fit.logl, fit.rmsea, fit.srmr) } else if(estimator == "MML") { fit.measures <- c(fit.always, fit.logl) } else { - fit.measures <- c(fit.always, - fit.chisq, fit.baseline, fit.cfi.tli, + fit.measures <- c(fit.always, + fit.chisq, fit.baseline, fit.cfi.tli, fit.rmsea, fit.srmr, fit.table) if(object@Options$mimic == "Mplus") { fit.measures <- c(fit.measures, "wrmr") @@ -210,7 +210,7 @@ } else if(fit.measures == "all") { if(estimator == "ML") { fit.measures <- c(fit.always, - fit.chisq, fit.baseline, fit.incremental, + fit.chisq, fit.baseline, fit.incremental, fit.logl, fit.rmsea, fit.srmr2, fit.other) } else { fit.measures <- c(fit.always, @@ -228,14 +228,14 @@ } # Chi-square value estimated model (H0) - if(any(c("fmin", "chisq", "chisq.scaled", + if(any(c("fmin", "chisq", "chisq.scaled", "chisq.scaling.factor") %in% fit.measures)) { indices["fmin"] <- fx indices["chisq"] <- X2 if(scaled) { indices["chisq.scaled"] <- X2.scaled indices["chisq.scaling.factor"] <- TEST[[2]]$scaling.factor - } + } } if(any(c("df", "df.scaled") %in% fit.measures)) { indices["df"] <- df @@ -251,25 +251,44 @@ } - if(any(c("cfi", "cfi.scaled", "cfi.robust", + if(any(c("cfi", "cfi.scaled", "cfi.robust", "tli", "tli.scaled", "tli.robust", - "nnfi", "nnfi.scaled", "nnfi.robust", + "nnfi", "nnfi.scaled", "nnfi.robust", "pnfi", "pnfi.scaled", "rfi", "rfi.scaled", "nfi", "nfi.scaled", "ifi", "ifi.scaled", "rni", "rni.scaled", "rni.robust", "baseline.chisq", "baseline.chisq.scaled", "baseline.pvalue", "baseline.pvalue.scaled") %in% fit.measures)) { - + # call explicitly independence model # this is not strictly needed for ML, but it is for # GLS and WLS # and MLM and MLR to get the scaling factor(s)! - if (!is.null(baseline.model) & is(baseline.model, "lavaan")) { + if (!is.null(baseline.model) && is(baseline.model, "lavaan")) { fit.indep <- baseline.model + } else if (!is.null(object@external$baseline.model) && + is(object@external$baseline.model, "lavaan")) { + fit.indep <- object@external$baseline.model + ## check baseline converged + if (!fit.indep@optim$converged) { + fit.indep <- NULL + } else { + ## check test matches and baseline converged + sameTest <- ( object@Options$test == fit.indep@Options$test ) + sameSE <- ( object@Options$se == fit.indep@Options$se ) + sameEstimator <- ( object@Options$estimator == fit.indep@Options$estimator ) + if (!all(sameTest, sameSE, sameEstimator)) { + fit.indep <- try(update(fit.indep, + test = object@Options$test, + se = object@Options$se, + estimator = object@Options$estimator), + silent = TRUE) + } + } } else { fit.indep <- try(lav_object_independence(object), silent = TRUE) } - + if(inherits(fit.indep, "try-error")) { X2.null <- df.null <- as.numeric(NA) if(scaled) { @@ -281,7 +300,7 @@ if(scaled) { X2.null.scaled <- fit.indep@test[[2]]$stat df.null.scaled <- fit.indep@test[[2]]$df - } + } } # check for NAs @@ -293,7 +312,7 @@ indices["baseline.chisq"] <- X2.null if(scaled) { indices["baseline.chisq.scaled"] <- X2.null.scaled - } + } } if("baseline.df" %in% fit.measures) { indices["baseline.df"] <- df.null @@ -304,7 +323,7 @@ if("baseline.pvalue" %in% fit.measures) { indices["baseline.pvalue"] <- fit.indep@test[[1]]$pvalue if(scaled) { - indices["baseline.pvalue.scaled"] <- + indices["baseline.pvalue.scaled"] <- fit.indep@test[[2]]$pvalue } } @@ -313,7 +332,7 @@ fit.indep@test[[2]]$scaling.factor } - # CFI - comparative fit index (Bentler, 1990) + # CFI - comparative fit index (Bentler, 1990) if("cfi" %in% fit.measures) { t1 <- max( c(X2 - df, 0) ) t2 <- max( c(X2 - df, X2.null - df.null, 0) ) @@ -338,11 +357,11 @@ if("cfi.robust" %in% fit.measures) { if(TEST[[2]]$test %in% c("satorra.bentler", "yuan.bentler")) { - + # see Brosseau-Liard & Savalei MBR 2014, equation 15 # what to do if X2 = 0 and df = 0? in this case, - # the scaling factor (ch) will be NA, and we get NA + # the scaling factor (ch) will be NA, and we get NA # (instead of 1) if(X2 < .Machine$double.eps && df == 0) { ch <- 0 @@ -361,7 +380,7 @@ indices["cfi.robust"] <- 1 - t1/t2 } } else { - indices["cfi.robust"] <- NA + indices["cfi.robust"] <- NA } } @@ -396,7 +415,7 @@ # see Brosseau-Liard & Savalei MBR 2014, equation 15 # what to do if X2 = 0 and df = 0? in this case, - # the scaling factor (ch) will be NA, and we get NA + # the scaling factor (ch) will be NA, and we get NA # (instead of 1) if(X2 < .Machine$double.eps && df == 0) { ch <- 0 @@ -435,7 +454,7 @@ # note: TLI original formula was in terms of fx/df, not X2/df # then, t1 <- fx_0/df.null - fx/df # t2 <- fx_0/df.null - 1/N (or N-1 for wishart) - + # note: in lavaan 0.5-21, we use the alternative formula: # TLI <- 1 - ((X2 - df)/(X2.null - df.null) * df.null/df) # - this one has the advantage that a 'robust' version @@ -451,28 +470,28 @@ } } - if("tli.scaled" %in% fit.measures || + if("tli.scaled" %in% fit.measures || "nnfi.scaled" %in% fit.measures) { t1 <- (X2.scaled - df.scaled)*df.null.scaled t2 <- (X2.null.scaled - df.null.scaled)*df.scaled if(is.na(t1) || is.na(t2)) { indices["tli.scaled"] <- indices["nnfi.scaled"] <- NA - } else if(df > 0 && t2 != 0) { + } else if(df > 0 && t2 != 0) { indices["tli.scaled"] <- indices["nnfi.scaled"] <- 1 - t1/t2 } else { indices["tli.scaled"] <- indices["nnfi.scaled"] <- 1 } } - if("tli.robust" %in% fit.measures || + if("tli.robust" %in% fit.measures || "nnfi.robust" %in% fit.measures) { if(TEST[[2]]$test %in% c("satorra.bentler", "yuan.bentler")) { # see Brosseau-Liard & Savalei MBR 2014, equation 16 # what to do if X2 = 0 and df = 0? in this case, - # the scaling factor (ch) will be NA, and we get NA + # the scaling factor (ch) will be NA, and we get NA # (instead of 1) if(X2 < .Machine$double.eps && df == 0) { ch <- 0 @@ -480,7 +499,7 @@ ch <- TEST[[2]]$scaling.factor } cb <- fit.indep@test[[2]]$scaling.factor - + t1 <- (X2 - ch*df)*df.null t2 <- (X2.null - cb*df.null)*df if(is.na(t1) || is.na(t2)) { @@ -494,9 +513,9 @@ indices["tli.robust"] <- indices["nnfi.robust"] <- NA } } - - - + + + # RFI - relative fit index (Bollen, 1986; Joreskog & Sorbom 1993) if("rfi" %in% fit.measures) { if(df > 0) { @@ -638,10 +657,10 @@ } else { # missing patterns case pat <- object@Data@Mp[[g]]$pat Ng <- object@Data@nobs[[g]] - ni <- as.numeric(apply(pat, 1, sum) %*% + ni <- as.numeric(apply(pat, 1, sum) %*% object@Data@Mp[[g]]$freq) fx.full <- object@SampleStats@missing.h1[[g]]$h1 - logl.H1.group[g] <- - (ni/2 * log(2 * pi)) - + logl.H1.group[g] <- - (ni/2 * log(2 * pi)) - (Ng/2 * fx.full) } } @@ -664,7 +683,7 @@ if(logl.ok) { for(g in 1:G) { Ng <- object@SampleStats@nobs[[g]] - logl.H0.group[g] <- -Ng * (fx.group[g] - + logl.H0.group[g] <- -Ng * (fx.group[g] - logl.H1.group[g]/Ng) } if(G > 1) { @@ -676,8 +695,8 @@ logl.H0 <- -1 * fx } else { logl.H0 <- as.numeric(NA) - } - + } + if("logl" %in% fit.measures) { indices["logl"] <- logl.H0 } @@ -699,13 +718,13 @@ BIC2 <- -2*logl.H0 + npar*log(N.star) indices["bic2"] <- BIC2 } - + # scaling factor for MLR if(object@Options$test == "yuan.bentler") { - indices["scaling.factor.h1"] <- + indices["scaling.factor.h1"] <- TEST[[2]]$scaling.factor.h1 - indices["scaling.factor.h0"] <- + indices["scaling.factor.h0"] <- TEST[[2]]$scaling.factor.h0 } } # ML @@ -732,7 +751,7 @@ if(any(c("rmsea","rmsea.scaled","rmsea.robust") %in% fit.measures)) { # RMSEA # - RMSEA.scaled replaces X2 by X2.scaled (which is not ok) - # - RMSEA.robust uses the formula from Broseau-Liard, Savalei & Li + # - RMSEA.robust uses the formula from Broseau-Liard, Savalei & Li # (2012) paper (see eq 8) if(is.na(X2) || is.na(df)) { RMSEA <- RMSEA.scaled <- RMSEA.robust <- as.numeric(NA) @@ -743,11 +762,11 @@ # scaling factor c.hat <- TEST[[2]]$scaling.factor - } + } if(object@Options$mimic %in% c("Mplus", "lavaan")) { RMSEA <- sqrt( max( c((X2/N)/df - 1/N, 0) ) ) * sqrt(G) if(scaled && test != "scaled.shifted") { - RMSEA.scaled <- + RMSEA.scaled <- sqrt( max( c((X2/N)/d - 1/N, 0) ) ) * sqrt(G) RMSEA.robust <- sqrt( max( c((X2/N)/df - c.hat/N, 0) ) ) * sqrt(G) @@ -793,7 +812,7 @@ if(inherits(lambda.l, "try-error")) { lambda.l <- NA } if(object@Options$mimic %in% c("lavaan", "Mplus")) { GG <- 0 - indices["rmsea.ci.lower"] <- + indices["rmsea.ci.lower"] <- sqrt( lambda.l/((N-GG)*df) ) * sqrt(G) } else { indices["rmsea.ci.lower"] <- sqrt( lambda.l/(N*df) ) @@ -814,7 +833,7 @@ (pchisq(XX2, df=df2, ncp=lambda) - 0.95) } if(is.na(XX2) || is.na(df2)) { - indices["rmsea.ci.lower.scaled"] <- + indices["rmsea.ci.lower.scaled"] <- indices["rmsea.ci.lower.robust"] <- NA } else if(df < 1 || df2 < 1 || lower.lambda(0) < 0.0) { indices["rmsea.ci.lower.scaled"] <- @@ -825,7 +844,7 @@ silent=TRUE) if(inherits(lambda.l, "try-error")) { lambda.l <- NA } if(object@Options$mimic %in% c("lavaan", "Mplus")) { - indices["rmsea.ci.lower.scaled"] <- + indices["rmsea.ci.lower.scaled"] <- sqrt( lambda.l/(N*df2) ) * sqrt(G) } else { # no multiple group correction @@ -847,7 +866,7 @@ sqrt( (c.hat*lambda.l)/(N*df2) ) * sqrt(G) } else { # no multiple group correction - indices["rmsea.ci.lower.robust"] <- + indices["rmsea.ci.lower.robust"] <- sqrt( (c.hat*lambda.l)/(N*df2) ) } } else { @@ -870,7 +889,7 @@ if(inherits(lambda.u, "try-error")) { lambda.u <- NA } if(object@Options$mimic %in% c("lavaan", "Mplus")) { GG <- 0 - indices["rmsea.ci.upper"] <- + indices["rmsea.ci.upper"] <- sqrt( lambda.u/((N-GG)*df) ) * sqrt(G) } else { indices["rmsea.ci.upper"] <- sqrt( lambda.u/(N*df) ) @@ -891,11 +910,11 @@ (pchisq(XX2, df=df2, ncp=lambda) - 0.05) } if(is.na(XX2) || is.na(df2)) { - indices["rmsea.ci.upper.scaled"] <- + indices["rmsea.ci.upper.scaled"] <- indices["rmsea.ci.upper.robust"] <- NA - } else if(df < 1 || df2 < 1 || upper.lambda(N.RMSEA) > 0 || + } else if(df < 1 || df2 < 1 || upper.lambda(N.RMSEA) > 0 || upper.lambda(0) < 0) { - indices["rmsea.ci.upper.scaled"] <- + indices["rmsea.ci.upper.scaled"] <- indices["rmsea.ci.upper.robust"] <- 0 } else { # 'scaled' @@ -922,7 +941,7 @@ silent=TRUE) if(inherits(lambda.u, "try-error")) { lambda.u <- NA } if(object@Options$mimic %in% c("lavaan", "Mplus")) { - indices["rmsea.ci.upper.robust"] <- + indices["rmsea.ci.upper.robust"] <- sqrt( (c.hat*lambda.u)/(N*df2) ) * sqrt(G) } else { # no multiple group correction @@ -933,7 +952,7 @@ indices["rmsea.ci.upper.robust"] <- NA } } - } + } if("rmsea.pvalue" %in% fit.measures) { if(is.na(X2) || is.na(df)) { @@ -941,10 +960,10 @@ } else if(df > 0) { if(object@Options$mimic %in% c("lavaan","Mplus")) { ncp <- N*df*0.05^2/G - indices["rmsea.pvalue"] <- + indices["rmsea.pvalue"] <- 1 - pchisq(X2, df=df, ncp=ncp) } else { - indices["rmsea.pvalue"] <- + indices["rmsea.pvalue"] <- 1 - pchisq(X2, df=df, ncp=(N*df*0.05^2)) } } else { @@ -962,13 +981,13 @@ df2 <- sum(TEST[[2]]$trace.UGamma) } if(is.na(XX2) || is.na(df2)) { - indices["rmsea.pvalue.scaled"] <- + indices["rmsea.pvalue.scaled"] <- indices["rmsea.pvalue.robust"] <- as.numeric(NA) } else if(df > 0) { # scaled if(object@Options$mimic %in% c("lavaan", "Mplus")) { ncp <- N*df2*0.05^2/G - indices["rmsea.pvalue.scaled"] <- + indices["rmsea.pvalue.scaled"] <- 1 - pchisq(XX2, df=df2, ncp=ncp) } else { indices["rmsea.pvalue.scaled"] <- @@ -995,7 +1014,7 @@ indices["rmsea.pvalue.robust"] <- NA } } else { - indices["rmsea.pvalue.scaled"] <- + indices["rmsea.pvalue.scaled"] <- indices["rmsea.pvalue.robust"] <- NA # used to be 1 in < 0.5-21 } } @@ -1039,7 +1058,7 @@ # standardized residual covariance matrix # this is the Hu and Bentler definition, not the Bollen one! - # this one is used by EQS + # this one is used by EQS # and Mplus, but only if information=expected (god knows why) sqrt.d <- 1/sqrt(diag(S)) D <- diag(sqrt.d, ncol=length(sqrt.d)) @@ -1057,12 +1076,12 @@ R.cor.mean <- M/sqrt(diag(S)) - Mu.hat/sqrt(diag(Sigma.hat)) e <- nvar*(nvar+1)/2 + nvar - srmr_bentler.group[g] <- + srmr_bentler.group[g] <- sqrt( (sum(R[lower.tri(R, diag=TRUE)]^2) + sum(R.mean^2))/ e ) rmr.group[g] <- sqrt( (sum(RR[lower.tri(RR, diag=TRUE)]^2) + sum(RR.mean^2))/ e ) - srmr_bollen.group[g] <- + srmr_bollen.group[g] <- sqrt( (sum(R.cor[lower.tri(R.cor, diag=TRUE)]^2) + sum(R.cor.mean^2)) / e ) # see http://www.statmodel.com/download/SRMR.pdf @@ -1072,20 +1091,20 @@ sum(((diag(S) - diag(Sigma.hat))/diag(S))^2)) / e ) e <- nvar*(nvar+1)/2 - srmr_bentler_nomean.group[g] <- + srmr_bentler_nomean.group[g] <- sqrt( sum( R[lower.tri( R, diag=TRUE)]^2) / e ) - rmr_nomean.group[g] <- + rmr_nomean.group[g] <- sqrt( sum(RR[lower.tri(RR, diag=TRUE)]^2) / e ) - srmr_bollen_nomean.group[g] <- + srmr_bollen_nomean.group[g] <- sqrt( sum(R.cor[lower.tri(R.cor, diag=TRUE)]^2) / e ) srmr_mplus_nomean.group[g] <- sqrt( (sum(R.cor[lower.tri(R.cor, diag=FALSE)]^2) + sum(((diag(S) - diag(Sigma.hat))/diag(S))^2)) / e ) } else { e <- nvar*(nvar+1)/2 - srmr_bentler_nomean.group[g] <- srmr_bentler.group[g] <- + srmr_bentler_nomean.group[g] <- srmr_bentler.group[g] <- sqrt( sum(R[lower.tri(R, diag=TRUE)]^2) / e ) - rmr_nomean.group[g] <- rmr.group[g] <- + rmr_nomean.group[g] <- rmr.group[g] <- sqrt( sum(RR[lower.tri(RR, diag=TRUE)]^2) / e ) srmr_bollen_nomean.group[g] <- srmr_bollen.group[g] <- sqrt( sum(R.cor[lower.tri(R.cor, diag=TRUE)]^2) / e ) @@ -1094,7 +1113,7 @@ sum(((diag(S) - diag(Sigma.hat))/diag(S))^2)) / e ) } } - + if(G > 1) { ## FIXME: get the scaling right SRMR_BENTLER <- as.numeric( (unlist(object@SampleStats@nobs) %*% srmr_bentler.group) / object@SampleStats@ntotal ) @@ -1173,14 +1192,14 @@ WLS.obs <- object@SampleStats@WLS.obs WLS.V <- lav_model_wls_v(lavmodel = object@Model, lavsamplestats = object@SampleStats, - estimator = object@Options$estimator, + structured = TRUE, lavdata = object@Data) WLS.est <- lav_object_inspect_wls_est(object) for(g in 1:G) { wls.obs <- WLS.obs[[g]] wls.est <- WLS.est[[g]] wls.v <- WLS.V[[g]] - + if(is.null(wls.v)) { gfi.group[g] <- as.numeric(NA) } else { @@ -1213,11 +1232,11 @@ indices["agfi"] <- 1 } # LISREL formula (Simplis book 2002, p. 126) - indices["pgfi"] <- (df/nel)*GFI + indices["pgfi"] <- (df/nel)*GFI } # MFI - McDonald Fit Index (McDonald, 1989) - if("mfi" %in% fit.measures) { + if("mfi" %in% fit.measures) { #MFI <- exp(-0.5 * (X2 - df)/(N-1)) # Hu & Bentler 1998 Table 1 MFI <- exp(-0.5 * (X2 - df)/N) indices["mfi"] <- MFI @@ -1279,7 +1298,7 @@ # print( fit.measures[ idx.missing ] ) # cat("\n") #} - + out <- unlist(indices[fit.measures]) if(length(out) > 0L) { @@ -1427,14 +1446,14 @@ t2.txt <- ifelse(scaled, sprintf(" %10.3f", x["nnfi.robust"]), "") cat(t0.txt, t1.txt, t2.txt, "\n", sep="") } - + if("nfi" %in% names.x) { t0.txt <- sprintf(" %-40s", "Bentler-Bonett Normed Fit Index (NFI)") t1.txt <- sprintf(" %10.3f", x["nfi"]) t2.txt <- ifelse(scaled, sprintf(" %10.3f", x["nfi.scaled"]), "") cat(t0.txt, t1.txt, t2.txt, "\n", sep="") } - + if("nfi" %in% names.x) { t0.txt <- sprintf(" %-40s", "Parsimony Normed Fit Index (PNFI)") t1.txt <- sprintf(" %10.3f", x["pnfi"]) diff -Nru r-cran-lavaan-0.5.22/R/lav_fit.R r-cran-lavaan-0.5.23.1097/R/lav_fit.R --- r-cran-lavaan-0.5.22/R/lav_fit.R 2016-09-19 14:37:28.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_fit.R 2017-01-25 17:52:45.000000000 +0000 @@ -4,7 +4,7 @@ VCOV = NULL, TEST = NULL) { - stopifnot(is.list(lavpartable), class(lavmodel) == "Model") + stopifnot(is.list(lavpartable), class(lavmodel) == "lavModel") # extract information from 'x' iterations = attr(x, "iterations") diff -Nru r-cran-lavaan-0.5.22/R/lav_fsr_croon.R r-cran-lavaan-0.5.23.1097/R/lav_fsr_croon.R --- r-cran-lavaan-0.5.22/R/lav_fsr_croon.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_fsr_croon.R 2017-02-17 07:29:04.000000000 +0000 @@ -0,0 +1,64 @@ +# use the `Croon' method to correct the covariance matrix +# of the factor scores +lav_fsr_croon_correction <- function(FS.COV, LVINFO, fs.method = "bartlett") { + + # ngroups + ngroups <- length(FS.COV) + + # FSR.COV + FSR.COV <- FS.COV + + for(g in 1:ngroups) { + + # number of factors - lv.names + nfac <- nrow(FS.COV[[g]]) + lv.names <- names(LVINFO[[g]]) + + # correct covariances only + if(fs.method != "bartlett") { + for(i in 1:(nfac-1)) { + LHS <- lv.names[i] + + A.y <- LVINFO[[g]][[LHS]]$fsm + lambda.y <- LVINFO[[g]][[LHS]]$lambda + + for(j in (i+1):nfac) { + RHS <- lv.names[j] + + A.x <- LVINFO[[g]][[RHS]]$fsm + lambda.x <- LVINFO[[g]][[RHS]]$lambda + + # always 1 if Bartlett + A.xy <- as.numeric(crossprod(A.x %*% lambda.x, + A.y %*% lambda.y)) + + # corrected covariance + FSR.COV[[g]][i,j] <- FSR.COV[[g]][j,i] <- + FS.COV[[g]][LHS,RHS] / A.xy + } + } + } + + # correct variances + for(i in 1:nfac) { + RHS <- lv.names[i] + + A.x <- LVINFO[[g]][[RHS]]$fsm + lambda.x <- LVINFO[[g]][[RHS]]$lambda + theta.x <- LVINFO[[g]][[RHS]]$theta + + if(fs.method == "bartlett") { + A.xx <- 1.0 + } else { + A.xx <- as.numeric(crossprod(A.x %*% lambda.x)) + } + + offset.x <- as.numeric(A.x %*% theta.x %*% t(A.x)) + + FSR.COV[[g]][i,i] <- (FS.COV[[g]][RHS, RHS] - offset.x)/A.xx + } + } # g + + FSR.COV +} + diff -Nru r-cran-lavaan-0.5.22/R/lav_integrate.R r-cran-lavaan-0.5.23.1097/R/lav_integrate.R --- r-cran-lavaan-0.5.22/R/lav_integrate.R 2016-09-20 07:06:52.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_integrate.R 2016-12-30 16:43:46.000000000 +0000 @@ -23,7 +23,7 @@ -# return Gauss-Hermite quadrature rules for given order (n) +# return Gauss-Hermite quadrature rule for given order (n) # return list: x = nodes, w = quadrature weights # @@ -31,8 +31,8 @@ # the eigenvalues of the Jacobi matrix; weights are given by the squares of the # first components of the (normalized) eigenvectors, multiplied by sqrt(pi) # -# (This is NOT Golub & Welsch, 1968: as they used a specific method -# tailored for tridiagonal symmetric matrices) +# (This is NOT identical to Golub & Welsch, 1968: as they used a specific +# method tailored for tridiagonal symmetric matrices) # # TODO: look at https://github.com/ajt60gaibb/FastGaussQuadrature.jl/blob/master/src/gausshermite.jl # featuring the work of Ignace Bogaert (UGent) @@ -41,20 +41,7 @@ # by sum( f(x_i) * w_i ) # # CHECK: sum(w_i) should be always sqrt(pi) = 1.772454 -# -# Example: define g <- function(x) dnorm(x,0,0.2) * exp(-x*x) -# plot: -# x <- seq(-5,5,0.01) -# plot(x, g(x)) -# brute force solution: integrate(g, -100, 100) = 0.9622504 -# using gauss_hermite, define f <- function(x) dnorm(x,0,0.2) -# XW <- lavaan:::lav_integration_gauss_hermite(200) -# sum( f(XW$x) * XW$w ) = 0.9622504 -# -# note that we need N=200 to get decent accuracy here, because we have -# a rather spiky function - -lav_integration_gauss_hermite <- function(n = 21L, revert = FALSE) { +lav_integration_gauss_hermite_xw <- function(n = 21L, revert = FALSE) { # force n to be an integer n <- as.integer(n); stopifnot(n > 0L) @@ -86,25 +73,27 @@ list(x = x, w = w) } -# 'dnorm' version: weighting function is not exp(-x^2) but -# w(x) = 1/(sqrt(2*pi)) * exp(-0.5 * x^2) -# -# Example: define g <- function(x) dnorm(x,0,0.2) * dnorm(x, 0.3, 0.4) -# brute force solution: integrate(g, -100, 100) = 0.712326 -# using gauss_hermite_dnorm, define f <- function(x) dnorm(x,0,0.2) -# XW <- lav_integration_gauss_hermite_dnorm(n=100, mean=0.3, sd=0.4) -# sum( f(XW$x) * XW$w ) = 0.712326 -lav_integration_gauss_hermite_dnorm <- function(n = 21L, mean = 0, sd = 1, - ndim = 1L, - revert = FALSE, - prune = 0) { - XW <- lav_integration_gauss_hermite(n = n, revert = revert) +# generate GH points + weights +lav_integration_gauss_hermite <- function(n = 21L, + dnorm = FALSE, + mean = 0, sd = 1, + ndim = 1L, + revert = TRUE, + prune = FALSE) { + + XW <- lav_integration_gauss_hermite_xw(n = n, revert = revert) + + # dnorm kernel? + if(dnorm) { + # scale/shift x + x <- XW$x * sqrt(2) * sd + mean - # scale/shift x - x <- XW$x * sqrt(2) * sd + mean - - # scale w - w <- XW$w / sqrt(pi) + # scale w + w <- XW$w / sqrt(pi) + } else { + x <- XW$x + w <- XW$w + } if(ndim > 1L) { # cartesian product @@ -117,7 +106,13 @@ } # prune? - if(prune > 0) { + if(is.logical(prune) && prune) { + # always divide by N=21 + lower.limit <- XW$w[1] * XW$w[floor((n+1)/2)] / 21 + keep.idx <- which(w > lower.limit) + w <- w[keep.idx] + x <- x[keep.idx,, drop = FALSE] + } else if(is.numeric(prune) && prune > 0) { lower.limit <- quantile(w, probs = prune) keep.idx <- which(w > lower.limit) w <- w[keep.idx] @@ -127,6 +122,152 @@ list(x=x, w=w) } +# backwards compatibility +lav_integration_gauss_hermite_dnorm <- function(n = 21L, mean = 0, sd = 1, + ndim = 1L, + revert = TRUE, + prune = FALSE) { + lav_integration_gauss_hermite(n = n, dnorm = TRUE, mean = mean, sd = sd, + ndim = ndim, revert = revert, prune = prune) +} + # plot 2-dim # out <- lavaan:::lav_integration_gauss_hermite_dnorm(n = 20, ndim = 2) # plot(out$x, cex = -10/log(out$w), col = "darkgrey", pch=19) + +# integrand g(x) has the form g(x) = f(x) dnorm(x, m, s^2) +lav_integration_f_dnorm <- function(func = NULL, # often ly.prod + dnorm.mean = 0, # dnorm mean + dnorm.sd = 1, # dnorm sd + XW = NULL, # GH points + n = 21L, # number of nodes + adaptive = FALSE, # adaptive? + iterative = FALSE, # iterative? + max.iter = 20L, # max iterations + verbose = FALSE, # verbose? + ...) { # optional args for 'f' + + # create GH rule + if(is.null(XW)) { + XW <- lav_integration_gauss_hermite_xw(n = n, revert = TRUE) + } + + if(!adaptive) { + w.star <- XW$w / sqrt(pi) + x.star <- dnorm.sd*(sqrt(2)*XW$x) + dnorm.mean + out <- sum( func(x.star, ...) * w.star ) + } else { + # Naylor & Smith (1982, 1988) + if(iterative) { + mu.est <- 0; sd.est <- 1 + + for(i in 1:max.iter) { + w.star <- sqrt(2) * sd.est * dnorm(sqrt(2)*sd.est*XW$x + mu.est,dnorm.mean, dnorm.sd) * exp(XW$x^2) * XW$w + x.star <- sqrt(2)*sd.est*XW$x + mu.est + LIK <- sum( func(x.star, ...) * w.star ) + + # update mu + mu.est <- sum(x.star * (func(x.star, ...) * w.star)/LIK) + + # update sd + var.est <- sum(x.star^2 * (func(x.star, ...) * w.star)/LIK) - mu.est^2 + sd.est <- sqrt(var.est) + + if(verbose) { + cat("i = ", i, "LIK = ", LIK, "mu.est = ", mu.est, + "sd.est = ", sd.est, "\n") + } + } + out <- LIK + + # Liu and Pierce (1994) + } else { + # integrand g(x) = func(x) * dnorm(x; m, s^2) + log.g <- function(x, ...) { + ## FIXME: should we take the log right away? + log(func(x, ...) * dnorm(x, mean = dnorm.mean, sd = dnorm.sd)) + } + # find mu hat and sd hat + mu.est <- optimize(f = log.g, interval = c(-10,10), + maximum = TRUE, tol=.Machine$double.eps, ...)$maximum + H <- as.numeric(numDeriv::hessian(func = log.g, x = mu.est, ...)) + sd.est <- sqrt(1/-H) + + w.star <- sqrt(2) * sd.est * dnorm(sd.est*(sqrt(2)*XW$x) + mu.est,dnorm.mean,dnorm.sd) * exp(XW$x^2) * XW$w + x.star <- sd.est*(sqrt(2)*XW$x) + mu.est + + out <- sum( func(x.star, ...) * w.star ) + } + } + + out +} + +# integrand g(z) has the form g(z) = f(sz+m) dnorm(z, 0, 1) +lav_integration_f_dnorm_z <- function(func = NULL, # often ly.prod + f.mean = 0, # f mean + f.sd = 1, # f sd + XW = NULL, # GH points + n = 21L, # number of nodes + adaptive = FALSE, # adaptive? + iterative = FALSE, # iterative? + max.iter = 20L, # max iterations + verbose = FALSE, # verbose? + ...) { # optional args for 'f' + + # create GH rule + if(is.null(XW)) { + XW <- lav_integration_gauss_hermite_xw(n = n, revert = TRUE) + } + + if(!adaptive) { + w.star <- XW$w / sqrt(pi) + x.star <- sqrt(2)*XW$x + out <- sum( func(f.sd*x.star + f.mean, ...) * w.star ) + } else { + # Naylor & Smith (1982, 1988) + if(iterative) { + mu.est <- 0; sd.est <- 1 + + for(i in 1:max.iter) { + w.star <- sqrt(2) * sd.est * dnorm(sd.est*sqrt(2)*XW$x + mu.est,0, 1) * exp(XW$x^2) * XW$w + x.star <- sd.est*(sqrt(2)*XW$x) + mu.est + LIK <- sum( func(f.sd*x.star + f.mean, ...) * w.star ) + + # update mu + mu.est <- sum(x.star * (func(f.sd*x.star + f.mean, ...) * w.star)/LIK) + + # update sd + var.est <- sum(x.star^2 * (func(f.sd*x.star + f.mean, ...) * w.star)/LIK) - mu.est^2 + sd.est <- sqrt(var.est) + + if(verbose) { + cat("i = ", i, "LIK = ", LIK, "mu.est = ", mu.est, + "sd.est = ", sd.est, "\n") + } + } + out <- LIK + + # Liu and Pierce (1994) + } else { + # integrand g(x) = func(x) * dnorm(x; m, s^2) + log.gz <- function(x, ...) { + ## FIXME: should we take the log right away? + log(func(f.sd*x + f.mean, ...) * dnorm(x, mean = 0, sd = 1)) + } + # find mu hat and sd hat + mu.est <- optimize(f = log.gz, interval = c(-10,10), + maximum = TRUE, tol=.Machine$double.eps, ...)$maximum + H <- as.numeric(numDeriv::hessian(func = log.gz, x = mu.est, ...)) + sd.est <- sqrt(1/-H) + + w.star <- sqrt(2) * sd.est * dnorm(sd.est*(sqrt(2)*XW$x) + mu.est,0,1) * exp(XW$x^2) * XW$w + x.star <- sd.est*(sqrt(2)*XW$x) + mu.est + + out <- sum( func(f.sd*x.star + f.mean, ...) * w.star ) + } + } + + out +} + diff -Nru r-cran-lavaan-0.5.22/R/lav_lavaanList_inspect.R r-cran-lavaan-0.5.23.1097/R/lav_lavaanList_inspect.R --- r-cran-lavaan-0.5.22/R/lav_lavaanList_inspect.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_lavaanList_inspect.R 2017-02-21 16:40:33.000000000 +0000 @@ -0,0 +1,287 @@ +# inspect a lavaanList object + +inspect.lavaanList <- function(object, what = "free", ...) { + lavListInspect(object = object, + what = what, + add.labels = TRUE, + add.class = TRUE, + drop.list.single.group = TRUE) +} + +# the `tech' version: no labels, full matrices, ... for further processing +lavTech.lavaanList <- function(object, + what = "free", + add.labels = FALSE, + add.class = FALSE, + list.by.group = FALSE, + drop.list.single.group = FALSE) { + + lavListInspect(object = object, what = what, + add.labels = add.labels, add.class = add.class, + list.by.group = list.by.group, + drop.list.single.group = drop.list.single.group) +} + +lavListTech <- function(object, + what = "free", + add.labels = FALSE, + add.class = FALSE, + list.by.group = FALSE, + drop.list.single.group = FALSE) { + + lavListInspect(object = object, what = what, + add.labels = add.labels, add.class = add.class, + list.by.group = list.by.group, + drop.list.single.group = drop.list.single.group) +} + +# just in case some uses lavInspect on a lavaanList object +lavInspect.lavaanList <- function(object, + what = "free", + add.labels = TRUE, + add.class = TRUE, + list.by.group = TRUE, + drop.list.single.group = TRUE) { + + lavListInspect(object = object, what = what, + add.labels = add.labels, add.class = add.class, + list.by.group = list.by.group, + drop.list.single.group = drop.list.single.group) +} + +lavListInspect <- function(object, + what = "free", + add.labels = TRUE, + add.class = TRUE, + list.by.group = TRUE, + drop.list.single.group = TRUE) { + + # object must inherit from class lavaanList + stopifnot(inherits(object, "lavaanList")) + + # only a single argument + if(length(what) > 1) { + stop("`what' arguments contains multiple arguments; only one is allowed") + } + + # be case insensitive + what <- tolower(what) + + + #### model matrices, with different contents #### + if(what == "free") { + lav_lavaanList_inspect_modelmatrices(object, what = "free", + type = "free", add.labels = add.labels, add.class = add.class, + list.by.group = list.by.group, + drop.list.single.group = drop.list.single.group) + } else if(what == "partable" || what == "user") { + lav_lavaanList_inspect_modelmatrices(object, what = "free", + type="partable", add.labels = add.labels, add.class = add.class, + list.by.group = list.by.group, + drop.list.single.group = drop.list.single.group) + } else if(what == "start" || what == "starting.values") { + lav_lavaanList_inspect_modelmatrices(object, what = "start", + add.labels = add.labels, add.class = add.class, + list.by.group = list.by.group, + drop.list.single.group = drop.list.single.group) + + + #### parameter table #### + } else if(what == "list") { + parTable(object) + + #### data + missingness #### + } else if(what == "ngroups") { + object@Data@ngroups + } else if(what == "group") { + object@Data@group + } else if(what == "cluster") { + object@Data@cluster + } else if(what == "ordered") { + object@Data@ordered + } else if(what == "group.label") { + object@Data@group.label + } else if(what == "nobs") { + unlist( object@Data@nobs ) + } else if(what == "norig") { + unlist( object@Data@norig ) + } else if(what == "ntotal") { + sum(unlist( object@Data@nobs )) + + + #### meanstructure, categorical #### + } else if(what == "meanstructure") { + object@Model@meanstructure + } else if(what == "categorical") { + object@Model@categorical + } else if(what == "fixed.x") { + object@Model@fixed.x + } else if(what == "parameterization") { + object@Model@parameterization + + # options + } else if(what == "options" || what == "lavoptions") { + object@Options + + # call + } else if(what == "call") { + as.list( object@call ) + + #### not found #### + } else { + stop("unknown `what' argument in inspect function: `", what, "'") + } + +} + + +lav_lavaanList_inspect_start <- function(object) { + + # from 0.5-19, they are in the partable + if(!is.null(object@ParTable$start)) { + OUT <- object@ParTable$start + } else { + # in < 0.5-19, we should look in @Fit@start + OUT <- object@Fit@start + } + + OUT +} + +lav_lavaanList_inspect_modelmatrices <- function(object, what = "free", + type = "free", add.labels = FALSE, add.class = FALSE, + list.by.group = FALSE, + drop.list.single.group = FALSE) { + + GLIST <- object@Model@GLIST + + for(mm in 1:length(GLIST)) { + + if(add.labels) { + dimnames(GLIST[[mm]]) <- object@Model@dimNames[[mm]] + } + + if(what == "free") { + # fill in free parameter counts + if(type == "free") { + m.el.idx <- object@Model@m.free.idx[[mm]] + x.el.idx <- object@Model@x.free.idx[[mm]] + #} else if(type == "unco") { + # m.el.idx <- object@Model@m.unco.idx[[mm]] + # x.el.idx <- object@Model@x.unco.idx[[mm]] + } else if(type == "partable") { + m.el.idx <- object@Model@m.user.idx[[mm]] + x.el.idx <- object@Model@x.user.idx[[mm]] + } else { + stop("lavaan ERROR: unknown type argument:", type, ) + } + # erase everything + GLIST[[mm]][,] <- 0.0 + GLIST[[mm]][m.el.idx] <- x.el.idx + } else if(what == "start") { + # fill in starting values + m.user.idx <- object@Model@m.user.idx[[mm]] + x.user.idx <- object@Model@x.user.idx[[mm]] + START <- lav_lavaanList_inspect_start(object) + GLIST[[mm]][m.user.idx] <- START[x.user.idx] + } + + # class + if(add.class) { + if(object@Model@isSymmetric[mm]) { + class(GLIST[[mm]]) <- c("lavaan.matrix.symmetric", "matrix") + } else { + class(GLIST[[mm]]) <- c("lavaan.matrix", "matrix") + } + } + } + + # try to reflect `equality constraints' + con.flag <- FALSE + if(what == "free" && object@Model@eq.constraints) { + # extract constraints from parameter table + PT <- parTable(object) + CON <- PT[PT$op %in% c("==","<",">") ,c("lhs","op","rhs")] + rownames(CON) <- NULL + + # replace 'labels' by parameter numbers + ID <- lav_partable_constraints_label_id(PT) + LABEL <- names(ID) + for(con in 1:nrow(CON)) { + # lhs + LHS.labels <- all.vars(as.formula(paste("~",CON[con,"lhs"]))) + + if(length(LHS.labels) > 0L) { + # par id + LHS.freeid <- ID[match(LHS.labels, LABEL)] + + # substitute + tmp <- CON[con,"lhs"] + for(pat in 1:length(LHS.labels)) { + tmp <- sub(LHS.labels[pat], LHS.freeid[pat], tmp) + } + CON[con,"lhs"] <- tmp + } + + # rhs + RHS.labels <- all.vars(as.formula(paste("~",CON[con,"rhs"]))) + + if(length(RHS.labels) > 0L) { + # par id + RHS.freeid <- ID[match(RHS.labels, LABEL)] + # substitute + tmp <- CON[con,"rhs"] + for(pat in 1:length(RHS.labels)) { + tmp <- sub(RHS.labels[pat], RHS.freeid[pat], tmp) + } + CON[con,"rhs"] <- tmp + } + } # con + + # add this info at the top + #GLIST <- c(constraints = list(CON), GLIST) + #no, not a good idea, it does not work with list.by.group + + # add it as a 'header' attribute? + attr(CON, "header") <- "Note: model contains equality constraints:" + con.flag <- TRUE + } + + # should we group them per group? + if(list.by.group) { + lavmodel <- object@Model + nmat <- lavmodel@nmat + + OUT <- vector("list", length = object@Data@ngroups) + for(g in 1:object@Data@ngroups) { + # which mm belong to group g? + mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] + mm.names <- names( GLIST[mm.in.group] ) + + OUT[[g]] <- GLIST[mm.in.group] + } + + if(object@Data@ngroups == 1L && drop.list.single.group) { + OUT <- OUT[[1]] + } else { + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) + } + } + } else { + OUT <- GLIST + } + + # header + if(con.flag) { + attr(OUT, "header") <- CON + } + + # lavaan.list + if(add.class) { + class(OUT) <- c("lavaan.list", "list") + } + + OUT +} + diff -Nru r-cran-lavaan-0.5.22/R/lav_matrix.R r-cran-lavaan-0.5.23.1097/R/lav_matrix.R --- r-cran-lavaan-0.5.22/R/lav_matrix.R 2016-05-01 12:15:52.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_matrix.R 2017-01-20 15:04:59.000000000 +0000 @@ -1100,6 +1100,10 @@ # erase all col/rows... } else if(ndel == NCOL(S.inv)) { out <- matrix(0,0,0) + } + + else { + stop("lavaan ERROR: column indices exceed number of columns in S.inv") } out diff -Nru r-cran-lavaan-0.5.22/R/lav_missing.R r-cran-lavaan-0.5.23.1097/R/lav_missing.R --- r-cran-lavaan-0.5.22/R/lav_missing.R 2016-03-26 14:14:55.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_missing.R 1970-01-01 00:00:00.000000000 +0000 @@ -1,147 +0,0 @@ -## This file contains various routines that are used when -## the data are incomplete -## -## initial verions Y.R. -- july 2010 -## -## - added EM algorithm: Y.R. aug 2011 - - -# mle using EM -estimate.moments.EM <- function (Y = NULL, Mp = NULL, Yp = NULL, - verbose = FALSE, max.iter = 500L, tol = 1e-05) { - - if(verbose) { - cat("\n") - cat("estimation saturated H1 model -- start EM steps\n") - } - - nvar <- ncol(Y); N <- nrow(Y) - if(length(Mp$empty.idx) > 0L) { - N <- N - length(Mp$empty.idx) - } - - # starting values as used by Mplus - mu0 <- apply(Y, 2, base::mean, na.rm = TRUE); names(mu0) <- NULL - var0 <- apply(Y, 2, stats::var, na.rm = TRUE); names(var0) <- NULL - sigma0 <- diag(x=var0, nrow=length(var0)) - mu <- mu0; sigma <- sigma0 - - # report - if(verbose) { - fx0 <- estimator.FIML(Sigma.hat=sigma, Mu.hat=mu, M=Yp) - cat(" EM iteration:", sprintf("%4d", 0), - " fx = ", sprintf("%15.10f", fx0), - "\n") - } - - # EM steps - for(i in 1:max.iter) { - T1 <- numeric(nvar) - T2 <- matrix(0, nvar, nvar) - for(p in seq_len(Mp$npatterns)) { - - nobs <- Yp[[p]]$freq - var.idx <- Mp$pat[p,] - - # extract raw data for these cases - X <- Y[Mp$case.idx[[p]], Mp$pat[p, ], drop = FALSE] - - if(all(var.idx)) { - # complete pattern - T1 <- T1 + colSums(X) - T2 <- T2 + crossprod(X) - next - } - - # partition Mu (1=missing, 2=complete) - Mu_1 <- mu[!var.idx] - Mu_2 <- mu[ var.idx] - - # partition Sigma (1=missing, 2=complete) - Sigma_11 <- sigma[!var.idx, !var.idx, drop=FALSE] - Sigma_12 <- sigma[!var.idx, var.idx, drop=FALSE] - Sigma_21 <- sigma[ var.idx, !var.idx, drop=FALSE] - Sigma_22 <- sigma[ var.idx, var.idx, drop=FALSE] - Sigma_22.inv <- try(inv.chol(Sigma_22, logdet=FALSE), silent = TRUE) - if(inherits(Sigma_22.inv, "try-error")) { - stop("lavaan ERROR: Sigma_22.inv cannot be inverted") - } - #Sigma_22.inv <- solve(Sigma_22) - - # estimate missing values in this pattern - Diff <- apply(X, 1, '-', Mu_2) - X_missing2 <- t(Sigma_12 %*% Sigma_22.inv %*% Diff) - X_missing <- t(apply(X_missing2, 1, '+', Mu_1)) - - # complete data for this pattern - X_complete <- matrix(0, nobs, nvar) - X_complete[, var.idx] <- X - X_complete[,!var.idx] <- X_missing - - # 1. SUM `completed' pattern - T1_p <- colSums(X_complete) - T1 <- T1 + T1_p - - # 2. CROSSPROD `completed' pattern - T2_p <- crossprod(X_complete) - - # correction for missing cells: conditional covariances - T2_p11 <- Sigma_11 - (Sigma_12 %*% Sigma_22.inv %*% Sigma_21) - T2_p[!var.idx, !var.idx] <- T2_p[!var.idx, !var.idx] + (T2_p11*nobs) - T2 <- T2 + T2_p - } - - # M-step -- Little & Rubin (2000) page 225: eq. 11.6 - # recompute mu and sigma - mu <- T1/N - sigma <- T2/N - tcrossprod(mu) - - # check if sigma is near-pd (+ poor fix) - ev <- eigen(sigma, symmetric = TRUE, only.values = TRUE) - tol <- 1e-6 # FIXME! - if(any(ev$values < tol)) { - #too.small <- which( ev$values < tol ) - #ev$values[too.small] <- tol - #ev$values <- ev$values + tol - #sigma <- ev$vectors %*% diag(ev$values) %*% t(ev$vectors) - - # ridge - diag(sigma) <- diag(sigma) + max(diag(sigma))*1e-08 - } - - # max absolute difference in parameter values - DELTA <- max(abs(c(mu, lav_matrix_vech(sigma)) - c(mu0, lav_matrix_vech(sigma0)))) - - # report fx - if(verbose) { - fx <- estimator.FIML(Sigma.hat=sigma, Mu.hat=mu, M=Yp) - cat(" EM iteration:", sprintf("%4d", i), - " fx = ", sprintf("%15.10f", fx), - " delta par = ", sprintf("%9.8f", DELTA), - "\n") - } - - # convergence check: using parameter values: - if(DELTA < tol) - break - - # again - mu0 <- mu; sigma0 <- sigma - } - - # compute fx if we haven't already - if(!verbose) - fx <- estimator.FIML(Sigma.hat=sigma, Mu.hat=mu, M=Yp) - - if(verbose) { - cat("estimated Sigma and Mu (H1):\n") - cat("\nSigma:\n"); print(sigma) - cat("\nMu:\n"); print(mu) - cat("\n") - cat("estimation saturated H1 model -- end\n\n") - } - - # fx <- estimator.FIML(Sigma.hat=sigma, Mu.hat=mu, M=Yp) - list(sigma = sigma, mu = mu, fx = fx) -} - diff -Nru r-cran-lavaan-0.5.22/R/lav_model_compute.R r-cran-lavaan-0.5.23.1097/R/lav_model_compute.R --- r-cran-lavaan-0.5.22/R/lav_model_compute.R 2016-01-06 13:27:31.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_compute.R 2017-02-21 08:23:48.000000000 +0000 @@ -6,13 +6,13 @@ nmat <- lavmodel@nmat nvar <- lavmodel@nvar - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks representation <- lavmodel@representation # return a list - Sigma.hat <- vector("list", length=ngroups) + Sigma.hat <- vector("list", length=nblocks) - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] @@ -46,7 +46,7 @@ attr(Sigma.hat[[g]], "log.det") <- Sigma.hat.log.det } } - } # ngroups + } # nblocks Sigma.hat } @@ -71,13 +71,13 @@ nmat <- lavmodel@nmat nvar <- lavmodel@nvar - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks representation <- lavmodel@representation # return a list - Sigma.hat <- vector("list", length=ngroups) + Sigma.hat <- vector("list", length=nblocks) - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] @@ -119,7 +119,7 @@ attr(Sigma.hat[[g]], "log.det") <- Sigma.hat.log.det } } - } # ngroups + } # nblocks Sigma.hat } @@ -130,14 +130,14 @@ if(is.null(GLIST)) GLIST <- lavmodel@GLIST nmat <- lavmodel@nmat - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks representation <- lavmodel@representation meanstructure <- lavmodel@meanstructure # return a list - Mu.hat <- vector("list", length=ngroups) + Mu.hat <- vector("list", length=nblocks) - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] @@ -150,7 +150,7 @@ } else { stop("only representation LISREL has been implemented for now") } - } # ngroups + } # nblocks Mu.hat } @@ -168,14 +168,14 @@ if(is.null(GLIST)) GLIST <- lavmodel@GLIST nmat <- lavmodel@nmat - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks representation <- lavmodel@representation meanstructure <- lavmodel@meanstructure # return a list - Mu.hat <- vector("list", length=ngroups) + Mu.hat <- vector("list", length=nblocks) - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] @@ -194,7 +194,7 @@ } else { stop("only representation LISREL has been implemented for now") } - } # ngroups + } # nblocks Mu.hat } @@ -206,16 +206,16 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation th.idx <- lavmodel@th.idx # return a list - TH <- vector("list", length=ngroups) + TH <- vector("list", length=nblocks) # compute TH for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { if(length(th.idx[[g]]) == 0) { TH[[g]] <- numeric(0L) @@ -243,16 +243,16 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation conditional.x <- lavmodel@conditional.x # return a list - PI <- vector("list", length=ngroups) + PI <- vector("list", length=nblocks) # compute TH for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] @@ -279,16 +279,16 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation group.w.free <- lavmodel@group.w.free # return a list - GW <- vector("list", length=ngroups) + GW <- vector("list", length=nblocks) # compute GW for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] @@ -308,7 +308,7 @@ # transform to proportions #gw <- unlist(GW) #gw <- exp(gw) / sum(exp(gw)) - #for(g in 1:ngroups) { + #for(g in 1:nblocks) { # GW[[g]] <- gw[g] #} @@ -323,20 +323,27 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list - VY <- vector("list", length=ngroups) + VY <- vector("list", length=nblocks) # compute TH for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] - cov.x <- lavsamplestats@cov.x[[g]] + if(!is.null(lavsamplestats)) { + cov.x <- lavsamplestats@cov.x[[g]] + } else { + if(lavmodel@fixed.x) { + stop("lavaaan ERROR: fixed.x = TRUE, but cov.x is NULL") + } + cov.x <- NULL + } if(representation == "LISREL") { VY.g <- computeVY.LISREL(MLIST = MLIST, cov.x = cov.x) @@ -360,20 +367,27 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list - ETA <- vector("list", length=ngroups) + ETA <- vector("list", length=nblocks) # compute ETA for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] - cov.x <- lavsamplestats@cov.x[[g]] + if(!is.null(lavsamplestats)) { + cov.x <- lavsamplestats@cov.x[[g]] + } else { + if(lavmodel@fixed.x) { + stop("lavaaan ERROR: fixed.x = TRUE, but cov.x is NULL") + } + cov.x <- NULL + } if(representation == "LISREL") { ETA.g <- computeVETA.LISREL(MLIST = MLIST, cov.x = cov.x) @@ -402,15 +416,15 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list - ETA <- vector("list", length=ngroups) + ETA <- vector("list", length=nblocks) # compute ETA for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] @@ -437,15 +451,15 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list - COV <- vector("list", length=ngroups) + COV <- vector("list", length=nblocks) # compute COV for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] @@ -485,15 +499,15 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list - EETA <- vector("list", length=ngroups) + EETA <- vector("list", length=nblocks) # compute E(ETA) for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] @@ -532,15 +546,15 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list - EETAx <- vector("list", length=ngroups) + EETAx <- vector("list", length=nblocks) # compute E(ETA) for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] @@ -585,15 +599,15 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list - LAMBDA <- vector("list", length=ngroups) + LAMBDA <- vector("list", length=nblocks) # compute LAMBDA for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] @@ -620,15 +634,15 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list - THETA <- vector("list", length=ngroups) + THETA <- vector("list", length=nblocks) # compute THETA for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] @@ -656,15 +670,15 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list - EY <- vector("list", length=ngroups) + EY <- vector("list", length=nblocks) # compute E(Y) for each group - for(g in 1:ngroups) { + for(g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] @@ -696,12 +710,16 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST + # ngroups, not nblocks! + ngroups <- lavsamplestats@ngroups + # return a list - YHAT <- vector("list", length=lavsamplestats@ngroups) + YHAT <- vector("list", length=ngroups) # compute YHAT for each group - for(g in seq_len(lavsamplestats@ngroups)) { + for(g in seq_len(ngroups)) { # which mm belong to group g? + # FIXME: what if more than g blocks??? mm.in.group <- 1:lavmodel@nmat[g] + cumsum(c(0L,lavmodel@nmat))[g] MLIST <- GLIST[ mm.in.group ] diff -Nru r-cran-lavaan-0.5.22/R/lav_model_estimate.R r-cran-lavaan-0.5.23.1097/R/lav_model_estimate.R --- r-cran-lavaan-0.5.22/R/lav_model_estimate.R 2016-04-30 15:13:40.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_estimate.R 2017-01-29 15:17:45.000000000 +0000 @@ -1,5 +1,4 @@ # model estimation - lav_model_estimate <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, @@ -53,7 +52,6 @@ lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, - estimator = estimator, verbose = verbose, forcePD = forcePD) @@ -103,7 +101,6 @@ lavcache = lavcache, type = "free", group.weight = group.weight, ### check me!! - estimator = estimator, verbose = verbose, forcePD = TRUE) @@ -192,7 +189,7 @@ attr(fx, "fx.group") <- rep(as.numeric(NA), ngroups) attr(x, "converged") <- FALSE attr(x, "iterations") <- 0L - attr(x, "control") <- lavmodel@control + attr(x, "control") <- lavoptions@control attr(x, "fx") <- fx return(x) } @@ -219,59 +216,44 @@ # first some nelder mead steps? (default = FALSE) - if(is.null(lavmodel@control$init_nelder_mead)) { - INIT_NELDER_MEAD <- FALSE - } else { - INIT_NELDER_MEAD <- lavmodel@control$init_nelder_mead - } + INIT_NELDER_MEAD <- lavoptions$optim.init_nelder_mead # gradient: analytic, numerical or NULL? - if(is.null(lavmodel@control$gradient)) { - GRADIENT <- first.derivative.param - } else { - if(is.logical(lavmodel@control$gradient)) { - if(lavmodel@control$gradient) { - GRADIENT <- first.derivative.param - } else { - GRADIENT <- NULL - } - } else if(is.character(lavmodel@control$gradient)) { - if(lavmodel@control$gradient %in% c("analytic","analytica")) { - GRADIENT <- first.derivative.param - } else if(lavmodel@control$gradient %in% c("numerical","numeric")) { - GRADIENT <- first.derivative.param.numerical - } else if(lavmodel@control$gradient == "NULL") { - GRADIENT <- NULL - } else { - warning("lavaan WARNING: lavmodel@control$gradient should be analytic, numerical or NULL") - GRADIENT <- NULL - } + if(is.character(lavoptions$optim.gradient)) { + if(lavoptions$optim.gradient %in% c("analytic","analytical")) { + GRADIENT <- first.derivative.param + } else if(lavoptions$optim.gradient %in% c("numerical", "numeric")) { + GRADIENT <- first.derivative.param.numerical + } else if(lavoptions$optim.gradient %in% c("NULL", "null")) { + GRADIENT <- NULL + } else { + warning("lavaan WARNING: gradient should be analytic, numerical or NULL") + } + } else if(is.logical(lavoptions$optim.gradient)) { + if(lavoptions$optim.gradient) { + GRADIENT <- first.derivative.param + } else { + GRADIENT <- NULL } + } else if(is.null(lavoptions$optim.gradient)) { + GRADIENT <- first.derivative.param } + # optimizer if(length(lavmodel@ceq.nonlinear.idx) == 0L && length(lavmodel@cin.linear.idx) == 0L && length(lavmodel@cin.nonlinear.idx) == 0L) { - if(is.null(lavmodel@control$optim.method)) { + if(is.null(lavoptions$optim.method)) { OPTIMIZER <- "NLMINB" #OPTIMIZER <- "BFGS" # slightly slower, no bounds; better scaling! #OPTIMIZER <- "L-BFGS-B" # trouble with Inf values for fx! } else { - OPTIMIZER <- toupper(lavmodel@control$optim.method) + OPTIMIZER <- toupper(lavoptions$optim.method) stopifnot(OPTIMIZER %in% c("NLMINB", "BFGS", "L-BFGS-B", "NONE")) } } else { - #cat("DEBUG: constrained optimization is currently broken!\n") - #cat("DEBUG: please reinstall lavaan 0.5-17 if you need this\n") - #cat("DEBUG: only *linear* *equality* constraints are supported in this version.\n") - #stop("not read yet - dev version") - if(is.null(lavmodel@control$optim.method)) { - OPTIMIZER <- "NLMINB.CONSTR" - } else { - OPTIMIZER <- toupper(lavmodel@control$optim.method) - stopifnot(OPTIMIZER %in% c("NLMINB.CONSTR", "NONE")) - } + OPTIMIZER <- "NLMINB.CONSTR" } if(INIT_NELDER_MEAD) { @@ -303,7 +285,7 @@ step.max=1.0, x.tol=1.5e-8, xf.tol=2.2e-14) - control.nlminb <- modifyList(control.nlminb, lavmodel@control) + control.nlminb <- modifyList(control.nlminb, lavoptions$control) control <- control.nlminb[c("eval.max", "iter.max", "trace", "step.min", "step.max", "abs.tol", "rel.tol", "x.tol", "xf.tol")] @@ -342,7 +324,7 @@ abstol=1e-20, reltol=1e-10, REPORT=1L) - control.bfgs <- modifyList(control.bfgs, lavmodel@control) + control.bfgs <- modifyList(control.bfgs, lavoptions$control) control <- control.bfgs[c("trace", "fnscale", "parscale", "ndeps", "maxit", "abstol", "reltol", "REPORT")] #trace <- 0L; if(verbose) trace <- 1L @@ -381,7 +363,7 @@ lmm=5L, factr=1e7, pgtol=0) - control.lbfgsb <- modifyList(control.lbfgsb, lavmodel@control) + control.lbfgsb <- modifyList(control.lbfgsb, lavoptions$control) control <- control.lbfgsb[c("trace", "fnscale", "parscale", "ndeps", "maxit", "REPORT", "lmm", "factr", "pgtol")] @@ -412,8 +394,8 @@ } else if(OPTIMIZER == "NLMINB.CONSTR") { ocontrol <- list(verbose=verbose) - if(!is.null(lavmodel@control$control.outer)) { - ocontrol <- c(lavmodel@control$control.outer, verbose=verbose) + if(!is.null(lavoptions$control$control.outer)) { + ocontrol <- c(lavoptions$control$control.outer, verbose=verbose) } control.nlminb <- list(eval.max=20000L, iter.max=10000L, @@ -421,7 +403,7 @@ #abs.tol=1e-20, abs.tol=(.Machine$double.eps * 10), rel.tol=1e-9) # 1e-10 seems 'too strict' - control.nlminb <- modifyList(control.nlminb, lavmodel@control) + control.nlminb <- modifyList(control.nlminb, lavoptions$control) control <- control.nlminb[c("eval.max", "iter.max", "trace", "abs.tol", "rel.tol")] cin <- cin.jac <- ceq <- ceq.jac <- NULL diff -Nru r-cran-lavaan-0.5.22/R/lav_model_gradient_mml.R r-cran-lavaan-0.5.23.1097/R/lav_model_gradient_mml.R --- r-cran-lavaan-0.5.22/R/lav_model_gradient_mml.R 2016-09-16 11:16:26.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_gradient_mml.R 2017-01-29 14:53:03.000000000 +0000 @@ -46,14 +46,14 @@ } # cholesky? - if(is.null(lavmodel@control$cholesky)) { + #if(is.null(lavmodel@control$cholesky)) { CHOLESKY <- TRUE - } else { - CHOLESKY <- as.logical(lavmodel@control$cholesky) + #} else { + # CHOLESKY <- as.logical(lavmodel@control$cholesky) #if(nfac > 1L && !CHOLESKY) { # warning("lavaan WARNING: CHOLESKY is OFF but nfac > 1L") #} - } + #} if(!CHOLESKY) { # we should still 'scale' the factors, if std.lv=FALSE @@ -61,8 +61,8 @@ } else { # cholesky takes care of scaling ETA.sd <- rep(1, nfac) - chol.VETA <- try(chol(VETAx), silent = TRUE) - if(inherits(chol.VETA, "try-error")) { + tchol.VETA <- try(chol(VETAx), silent = TRUE) + if(inherits(tchol.VETA, "try-error")) { warning("lavaan WARNING: --- VETAx not positive definite") print(VETAx) return(0) @@ -138,7 +138,7 @@ GLIST <- lav_model_x2GLIST(lavmodel, x=x, type="free") VETAx <- computeVETAx(lavmodel, GLIST = GLIST)[[g]] if(CHOLESKY) { - S <- chol(VETAx) + S <- chol(VETAx) ### FIXME or t(chol())???? } else { S <- diag( sqrt(diag(VETAx)) ) } @@ -161,7 +161,7 @@ # rescale/unwhiten if(CHOLESKY) { - eta <- eta %*% chol.VETA + eta <- eta %*% tchol.VETA } else { # no unit scale? (un-standardize) eta <- sweep(eta, MARGIN=2, STATS=ETA.sd, FUN="*") @@ -169,7 +169,7 @@ # eta_i = alpha + BETA eta_i + GAMMA eta_i + error # - # - direct effect of BETA is already in VETAx, and hence chol.VETA + # - direct effect of BETA is already in VETAx, and hence tchol.VETA # - need to add alpha, and GAMMA eta_i if(!is.null(MLIST$alpha) || !is.null(MLIST$gamma)) { eta <- sweep(EETAx, MARGIN=2, STATS=eta, FUN="+") diff -Nru r-cran-lavaan-0.5.22/R/lav_model_gradient.R r-cran-lavaan-0.5.23.1097/R/lav_model_gradient.R --- r-cran-lavaan-0.5.22/R/lav_model_gradient.R 2016-09-16 10:44:28.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_gradient.R 2017-02-21 08:49:27.000000000 +0000 @@ -6,7 +6,6 @@ lavdata = NULL, lavcache = NULL, type = "free", - estimator = "ML", verbose = FALSE, forcePD = TRUE, group.weight = TRUE, @@ -15,6 +14,7 @@ x.el.idx = NULL) { nmat <- lavmodel@nmat + estimator <- lavmodel@estimator representation <- lavmodel@representation meanstructure <- lavmodel@meanstructure categorical <- lavmodel@categorical @@ -31,6 +31,7 @@ if(estimator == "REML") warning("analytical gradient not implement; use numerical approximation") # group.weight + # FIXME --> block.weight if(group.weight) { if(estimator %in% c("ML","PML","FML","MML","REML","NTRLS")) { group.w <- (unlist(lavsamplestats@nobs)/lavsamplestats@ntotal) @@ -39,7 +40,7 @@ group.w <- ((unlist(lavsamplestats@nobs)-1)/lavsamplestats@ntotal) } } else { - group.w <- rep(1.0, lavsamplestats@ngroups) + group.w <- rep(1.0, lavmodel@nblocks) } # do we need WLS.est? @@ -79,7 +80,7 @@ if(lavmodel@nexo > 0L) { PI <- computePI(lavmodel = lavmodel) } else { - PI <- vector("list", length = lavsamplestats@ngroups) + PI <- vector("list", length = lavmodel@nblocks) } } if(group.w.free) { @@ -116,13 +117,13 @@ estimator=estimator, meanstructure=FALSE, conditional.x = conditional.x) - Omega.mu <- vector("list", length=lavsamplestats@ngroups) + Omega.mu <- vector("list", length = lavmodel@nblocks) } # compute DX (for all elements in every model matrix) DX <- vector("list", length=length(GLIST)) - for(g in 1:lavsamplestats@ngroups) { + for(g in 1:lavmodel@nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] mm.names <- names( GLIST[mm.in.group] ) @@ -145,7 +146,7 @@ } # weight by group - if(lavsamplestats@ngroups > 1L) { + if(lavmodel@nblocks > 1L) { for(mm in mm.in.group) { DX[[mm]] <- group.w[g] * DX[[mm]] } @@ -155,7 +156,7 @@ # extract free parameters if(type == "free") { dx <- numeric( nx.free ) - for(g in 1:lavsamplestats@ngroups) { + for(g in 1:lavmodel@nblocks) { mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] for(mm in mm.in.group) { m.free.idx <- lavmodel@m.free.idx[[mm]] @@ -187,7 +188,7 @@ Delta <- computeDelta(lavmodel = lavmodel, GLIST. = GLIST) } - for(g in 1:lavsamplestats@ngroups) { + for(g in 1:lavmodel@nblocks) { #diff <- as.matrix(lavsamplestats@WLS.obs[[g]] - WLS.est[[g]]) #group.dx <- -1 * ( t(Delta[[g]]) %*% lavsamplestats@WLS.V[[g]] %*% diff) # 0.5-17: use crossprod twice; treat DWLS/ULS special @@ -275,7 +276,7 @@ Delta <- computeDelta(lavmodel = lavmodel, GLIST. = GLIST) } - for(g in 1:lavsamplestats@ngroups) { + for(g in 1:lavmodel@nblocks) { # augmented mean.x + cov.x matrix mean.x <- lavsamplestats@mean.x[[g]] @@ -361,7 +362,7 @@ Delta <- computeDelta(lavmodel = lavmodel, GLIST. = GLIST) } - for(g in 1:lavsamplestats@ngroups) { + for(g in 1:lavmodel@nblocks) { #print(GLIST) #print(lav_model_get_parameters(lavmodel = lavmodel, GLIST = GLIST)) @@ -493,7 +494,7 @@ conditional.x <- lavmodel@conditional.x group.w.free <- lavmodel@group.w.free nmat <- lavmodel@nmat - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks nvar <- lavmodel@nvar num.idx <- lavmodel@num.idx th.idx <- lavmodel@th.idx @@ -516,8 +517,8 @@ type <- "free" # number of rows in DELTA.group - pstar <- integer(ngroups) - for(g in 1:ngroups) { + pstar <- integer(nblocks) + for(g in 1:nblocks) { pstar[g] <- as.integer(nvar[g] * (nvar[g] + 1) / 2) if(lavmodel@meanstructure) { pstar[g] <- nvar[g] + pstar[g] # first the means, then sigma @@ -573,8 +574,8 @@ # compute Delta - Delta <- vector("list", length=ngroups) - for(g in 1:ngroups) { + Delta <- vector("list", length=nblocks) + for(g in 1:nblocks) { Delta.group <- matrix(0, nrow=pstar[g], ncol=NCOL) # which mm belong to group g? @@ -736,7 +737,7 @@ representation <- lavmodel@representation nmat <- lavmodel@nmat - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks num.idx <- lavmodel@num.idx th.idx <- lavmodel@th.idx @@ -764,8 +765,8 @@ #} # compute Delta per group - Delta <- vector("list", length=ngroups) - for(g in 1:ngroups) { + Delta <- vector("list", length=nblocks) + for(g in 1:nblocks) { mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] Delta.group <- NULL for(mm in mm.in.group) { @@ -837,10 +838,13 @@ lavsamplestats=NULL, estimator="ML", meanstructure=FALSE, conditional.x = FALSE) { - Omega <- vector("list", length=lavsamplestats@ngroups) - Omega.mu <- vector("list", length=lavsamplestats@ngroups) + # nblocks + nblocks <- length(Sigma.hat) - for(g in 1:lavsamplestats@ngroups) { + Omega <- vector("list", length = nblocks) + Omega.mu <- vector("list", length = nblocks) + + for(g in 1:nblocks) { # ML if(estimator == "ML" || estimator == "REML") { diff -Nru r-cran-lavaan-0.5.22/R/lav_model_hessian.R r-cran-lavaan-0.5.23.1097/R/lav_model_hessian.R --- r-cran-lavaan-0.5.22/R/lav_model_hessian.R 2015-03-06 15:31:51.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_hessian.R 2017-02-05 14:39:17.000000000 +0000 @@ -3,10 +3,12 @@ lav_model_hessian <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, - estimator = "ML", + lavoptions = NULL, lavcache = NULL, group.weight = TRUE) { + estimator <- lavmodel@estimator + # computing the Richardson extrapolation Hessian <- matrix(0, lavmodel@nx.free, lavmodel@nx.free) x <- lav_model_get_parameters(lavmodel = lavmodel) @@ -24,7 +26,6 @@ lavdata = lavdata, lavcache = lavcache, type = "free", - estimator = estimator, group.weight = group.weight) g.left2 <- lav_model_gradient(lavmodel = lavmodel, @@ -34,7 +35,6 @@ lavdata = lavdata, lavcache = lavcache, type = "free", - estimator = estimator, group.weight = group.weight) g.right <- @@ -45,7 +45,6 @@ lavdata = lavdata, lavcache = lavcache, type = "free", - estimator = estimator, group.weight = group.weight) g.right2 <- @@ -56,7 +55,6 @@ lavdata = lavdata, lavcache = lavcache, type = "free", - estimator = estimator, group.weight = group.weight) Hessian[,j] <- (g.left2 - 8*g.left + 8*g.right - g.right2)/(12*h.j) @@ -72,7 +70,6 @@ lav_model_hessian_complex <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, - estimator = "ML", lavcache = NULL, group.weight = TRUE) { @@ -84,7 +81,6 @@ lavdata = lavdata, lavcache = lavcache, type = "free", - estimator = estimator, group.weight = group.weight) dx } diff -Nru r-cran-lavaan-0.5.22/R/lav_model_implied.R r-cran-lavaan-0.5.23.1097/R/lav_model_implied.R --- r-cran-lavaan-0.5.22/R/lav_model_implied.R 2016-09-20 07:38:14.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_implied.R 2017-02-21 08:34:21.000000000 +0000 @@ -1,7 +1,7 @@ # compute model implied statistics lav_model_implied <- function(lavmodel = NULL) { - stopifnot(inherits(lavmodel, "Model")) + stopifnot(inherits(lavmodel, "lavModel")) # model-implied variance/covariance matrix ('sigma hat') Sigma.hat <- computeSigmaHat(lavmodel = lavmodel) @@ -13,14 +13,14 @@ if(lavmodel@conditional.x) { SLOPES <- computePI(lavmodel = lavmodel) } else { - SLOPES <- vector("list", length = lavmodel@ngroups) + SLOPES <- vector("list", length = lavmodel@nblocks) } # if categorical, model-implied thresholds if(lavmodel@categorical) { TH <- computeTH(lavmodel = lavmodel) } else { - TH <- vector("list", length = lavmodel@ngroups) + TH <- vector("list", length = lavmodel@nblocks) } if(lavmodel@group.w.free) { @@ -28,7 +28,7 @@ GW <- unname(lavmodel@GLIST[ w.idx ]) GW <- lapply(GW, as.numeric) } else { - GW <- vector("list", length = lavmodel@ngroups) + GW <- vector("list", length = lavmodel@nblocks) } # FIXME: should we use 'res.cov', 'res.int', 'res.th' if conditionl.x?? diff -Nru r-cran-lavaan-0.5.22/R/lav_model_information.R r-cran-lavaan-0.5.23.1097/R/lav_model_information.R --- r-cran-lavaan-0.5.22/R/lav_model_information.R 2016-07-19 20:02:02.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_information.R 2017-02-06 08:54:58.000000000 +0000 @@ -8,15 +8,17 @@ lav_model_information <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, - estimator = "ML", Delta = NULL, lavcache = NULL, + lavoptions = NULL, information = "observed", extra = FALSE, augmented = FALSE, inverted = FALSE, use.ginv = FALSE) { + estimator <- lavmodel@estimator + # compute information matrix if(information == "observed") { if(lavsamplestats@missing.flag) { @@ -26,13 +28,21 @@ } E <- lav_model_information_observed(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - lavcache = lavcache, estimator = estimator, - group.weight = group.weight, + lavcache = lavcache, group.weight = group.weight, + lavoptions = lavoptions, augmented = augmented, inverted = inverted, use.ginv = use.ginv) } else { + # structured of unstructured? (since 0.5-23) + if(!is.null(lavoptions) && + !is.null(lavoptions$h1.information) && + lavoptions$h1.information == "unstructured") { + structured <- FALSE + } else { + structured <- TRUE + } E <- lav_model_information_expected(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - lavcache = lavcache, estimator = estimator, extra = extra, + lavcache = lavcache, extra = extra, structured = structured, augmented = augmented, inverted = inverted, use.ginv = use.ginv) } @@ -41,10 +51,14 @@ } # fisher/expected information +# +# information = Delta' H Delta, where H is the unit information of +# the saturated model (evaluated either at the structured or unstructured +# estimates) lav_model_information_expected <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, - estimator = "ML", + structured = TRUE, Delta = NULL, lavcache = NULL, extra = FALSE, @@ -52,6 +66,8 @@ inverted = FALSE, use.ginv = FALSE) { + estimator <- lavmodel@estimator + if(inverted) { augmented <- TRUE } @@ -63,9 +79,9 @@ # compute/get WLS.V # if DWLS or ULS, this is the diagonal only! (since 0.5-17) - WLS.V <- lav_model_wls_v(lavmodel = lavmodel, + WLS.V <- lav_model_wls_v(lavmodel = lavmodel, lavsamplestats = lavsamplestats, - estimator = estimator, + structured = structured, lavdata = lavdata) # compute Information per group @@ -182,36 +198,121 @@ } lav_model_information_observed <- function(lavmodel = NULL, - lavsamplestats = NULL, - lavdata = NULL, - estimator = "ML", - lavcache = NULL, - group.weight = TRUE, - augmented = FALSE, - inverted = FALSE, - use.ginv = FALSE) { + lavsamplestats = NULL, + lavdata = NULL, + lavcache = NULL, + lavoptions = NULL, + group.weight = TRUE, + augmented = FALSE, + inverted = FALSE, + use.ginv = FALSE) { + estimator <- lavmodel@estimator if(inverted) { augmented <- TRUE } - Hessian <- lav_model_hessian(lavmodel = lavmodel, - lavsamplestats = lavsamplestats, - lavdata = lavdata, - lavcache = lavcache, - estimator = estimator, - group.weight = group.weight) - - # NOTE! What is the relationship between the Hessian of the objective - # function, and the `information' matrix (unit or total) - - # 1. in lavaan, we ALWAYS minimize, so the Hessian is already pos def - # 2. currently, all estimators give unit information, except MML and PML - Information <- Hessian - - # divide by 'N' for MML and PML - if(estimator == "PML" || estimator == "MML") { - Information <- Information / lavsamplestats@ntotal + # observed.information: + # - "hessian": second derivative of objective function + # - "h1": observed information matrix of saturated (h1) model, + # pre- and post-multiplied by the jacobian of the model + # parameters (Delta), usually evaluated at the structured + # sample statistics + if(!is.null(lavoptions) && + !is.null(lavoptions$observed.information) && + lavoptions$observed.information == "h1") { + observed.information <- "h1" + } else { + observed.information <- "hessian" + } + + + if(observed.information == "hessian") { + Hessian <- lav_model_hessian(lavmodel = lavmodel, + lavsamplestats = lavsamplestats, + lavdata = lavdata, + lavoptions = lavoptions, + lavcache = lavcache, + group.weight = group.weight) + + # NOTE! What is the relationship between the Hessian of the objective + # function, and the `information' matrix (unit or total) + + # 1. in lavaan, we ALWAYS minimize, so the Hessian is already pos def + # 2. currently, all estimators give unit information, except MML and PML + Information <- Hessian + + # divide by 'N' for MML and PML + if(estimator == "PML" || estimator == "MML") { + Information <- Information / lavsamplestats@ntotal + } + + + # using 'observed h1 information' + } else { + + # compute DELTA + Delta <- computeDelta(lavmodel = lavmodel) + # compute observed information h1 + if(lavmodel@estimator == "GLS" || lavmodel@estimator == "WLS") { + WLS.V <- lavsamplestats@WLS.V + } else if(lavmodel@estimator == "DWLS" || lavmodel@estimator == "ULS") { + # diagonal only!! + WLS.V <- lavsamplestats@WLS.VD + } else if(lavmodel@estimator == "ML") { + WLS.V <- vector("list", length=lavsamplestats@ngroups) + # four options: + # - complete data, structured (default) + # - complete data, unstructured + # - incomplete data, structured (default) + # - incomplete data, unstructured + if(lavoptions$h1.information == "structured") { + SIGMA <- computeSigmaHat(lavmodel = lavmodel) + MU <- computeMuHat(lavmodel = lavmodel) + } else { + SIGMA <- lavsamplestats@cov + MU <- lavsamplestats@mean + } + + # - if missing = two.stage, MU/SIGMA can be EM estimates + # if unstructured, or model-implied moments if structured + for(g in 1:lavsamplestats@ngroups) { + WLS.V[[g]] <- + lav_mvnorm_information_observed_samplestats( + sample.mean = lavsamplestats@mean[[g]], + sample.cov = lavsamplestats@cov[[g]], + Mu = MU[[g]], + Sigma = SIGMA[[g]]) + } + } else { + stop("lavaan ERROR: observed.information = ", + dQuote(observed.information), " not supported for estimator ", + dQuote(lavmodel@estimator) ) + } + + # compute Information per group + Info.group <- vector("list", length=lavsamplestats@ngroups) + for(g in 1:lavsamplestats@ngroups) { + fg <- lavsamplestats@nobs[[g]]/lavsamplestats@ntotal + # compute information for this group + if(estimator %in% c("DWLS", "ULS")) { + # diagonal weight matrix + Delta2 <- sqrt(WLS.V[[g]]) * Delta[[g]] + Info.group[[g]] <- fg * crossprod(Delta2) + } else { + # full weight matrix + Info.group[[g]] <- + fg * ( crossprod(Delta[[g]], WLS.V[[g]]) %*% Delta[[g]] ) + } + } + + # assemble over groups + Information <- Info.group[[1]] + if(lavsamplestats@ngroups > 1) { + for(g in 2:lavsamplestats@ngroups) { + Information <- Information + Info.group[[g]] + } + } } # augmented information? @@ -223,6 +324,12 @@ use.ginv = use.ginv) } + # for two.stage + observed.hession = "h1" + if(observed.information != "hessian") { + attr(Information, "Delta") <- Delta + attr(Information, "WLS.V") <- WLS.V + } + Information } @@ -230,13 +337,14 @@ lav_model_information_firstorder <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, - estimator = "ML", lavcache = NULL, extra = FALSE, check.pd = FALSE, augmented = FALSE, inverted = FALSE, use.ginv = FALSE) { + estimator <- lavmodel@estimator + if(inverted) { augmented <- TRUE } diff -Nru r-cran-lavaan-0.5.22/R/lav_model_lik.R r-cran-lavaan-0.5.23.1097/R/lav_model_lik.R --- r-cran-lavaan-0.5.22/R/lav_model_lik.R 2016-09-16 17:42:11.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_lik.R 2017-01-29 14:52:39.000000000 +0000 @@ -50,22 +50,22 @@ } # cholesky? - if(is.null(lavmodel@control$cholesky)) { + #if(is.null(lavmodel@control$cholesky)) { CHOLESKY <- TRUE - } else { - CHOLESKY <- as.logical(lavmodel@control$cholesky) + #} else { + # CHOLESKY <- as.logical(lavmodel@control$cholesky) #if(nfac > 1L && !CHOLESKY) { # warning("lavaan WARNING: CHOLESKY is OFF but nfac > 1L") #} - } + #} if(!CHOLESKY) { # we should still 'scale' the factors, if std.lv=FALSE ETA.sd <- sqrt( diag(VETAx) ) } else { # cholesky takes care of scaling - chol.VETA <- try(chol(VETAx), silent = TRUE) - if(inherits(chol.VETA, "try-error")) { + tchol.VETA <- try(chol(VETAx), silent = TRUE) + if(inherits(tchol.VETA, "try-error")) { warning("lavaan WARNING: --- VETAx not positive definite") print(VETAx) return(0) @@ -107,7 +107,7 @@ # rescale/unwhiten if(CHOLESKY) { # un-orthogonalize - XQ <- XQ %*% chol.VETA + XQ <- XQ %*% tchol.VETA } else { # no unit scale? (un-standardize) XQ <- sweep(XQ, MARGIN=2, STATS=ETA.sd, FUN="*") @@ -122,7 +122,7 @@ # eta_i = alpha + BETA eta_i + GAMMA eta_i + error # - # - direct effect of BETA is already in VETAx, and hence chol.VETA + # - direct effect of BETA is already in VETAx, and hence tchol.VETA # - need to add alpha, and GAMMA eta_i if(!is.null(MLIST$alpha) || !is.null(MLIST$gamma)) { if(conditional.x) { diff -Nru r-cran-lavaan-0.5.22/R/lav_model_objective.R r-cran-lavaan-0.5.23.1097/R/lav_model_objective.R --- r-cran-lavaan-0.5.22/R/lav_model_objective.R 2016-09-16 10:43:06.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_objective.R 2017-02-24 11:20:53.000000000 +0000 @@ -5,7 +5,6 @@ lavsamplestats = NULL, lavdata = NULL, lavcache = NULL, - estimator = "ML", verbose = FALSE, forcePD = TRUE, debug = FALSE) { @@ -14,13 +13,14 @@ if(is.null(GLIST)) GLIST <- lavmodel@GLIST # shortcut for data.type == "none" or estimator == "none" - if(estimator == "none" || length(lavsamplestats@cov) == 0L) { + if(lavmodel@estimator == "none" || length(lavsamplestats@cov) == 0L) { fx <- as.numeric(NA) attr(fx, "fx.group") <- rep(as.numeric(NA), lavsamplestats@ngroups) return(fx) } meanstructure <- lavmodel@meanstructure + estimator <- lavmodel@estimator categorical <- lavmodel@categorical group.w.free <- lavmodel@group.w.free fixed.x <- lavmodel@fixed.x @@ -94,6 +94,7 @@ fx <- 0.0 fx.group <- numeric( lavsamplestats@ngroups ) logl.group <- rep(as.numeric(NA), lavsamplestats@ngroups) + for(g in 1:lavsamplestats@ngroups) { # incomplete data and fiml? @@ -103,8 +104,8 @@ if(!attr(Sigma.hat[[g]], "po")) return(Inf) group.fx <- estimator.FIML(Sigma.hat=Sigma.hat[[g]], Mu.hat=Mu.hat[[g]], - M=lavsamplestats@missing[[g]], - h1=lavsamplestats@missing.h1[[g]]$h1) + Yp=lavsamplestats@missing[[g]], + h1=lavsamplestats@missing.h1[[g]]$h1, N=lavsamplestats@nobs[[g]]) } else { stop("this estimator: `", estimator, "' can not be used with incomplete data and the missing=\"ml\" option") @@ -112,7 +113,9 @@ } else if(estimator == "ML" || estimator == "Bayes") { # complete data # ML and friends - if(conditional.x) { + if(lavdata@nlevels > 1L) { + group.fx <- 0 + } else if(conditional.x) { group.fx <- estimator.ML_res( Sigma.hat = Sigma.hat[[g]], Mu.hat = Mu.hat[[g]], @@ -216,7 +219,9 @@ } if(estimator == "ML" || estimator == "REML" || estimator == "NTRLS") { - group.fx <- 0.5 * group.fx ## FIXME + if(lavdata@nlevels == 1L) { + group.fx <- 0.5 * group.fx ## FIXME + } } else if(estimator == "PML" || estimator == "FML" || estimator == "MML") { # do nothing @@ -289,3 +294,4 @@ fx } + diff -Nru r-cran-lavaan-0.5.22/R/lav_model.R r-cran-lavaan-0.5.23.1097/R/lav_model.R --- r-cran-lavaan-0.5.22/R/lav_model.R 2015-12-20 14:54:39.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model.R 2017-02-21 11:35:36.000000000 +0000 @@ -1,25 +1,21 @@ -# constructor of the matrix representation +# constructor of the matrix lavoptions$representation # # initial version: YR 22/11/2010 # - YR 14 Jan 2014: moved to lav_model.R # - YR 18 Nov 2014: more efficient handling of linear equality constraints # - YR 02 Dec 2014: allow for bare-minimum parameter tables +# - YR 25 Jan 2017: collect options in lavoptions -# construct MATRIX representation of the model +# construct MATRIX lavoptions$representation of the model lav_model <- function(lavpartable = NULL, - representation = "LISREL", - conditional.x = FALSE, - th.idx = list(), - parameterization = "delta", - link = "logit", - control = list(), - debug = FALSE) { + lavoptions = NULL, + th.idx = list()) { # handle bare-minimum partables lavpartable <- lav_partable_complete(lavpartable) # global info from user model - ngroups <- max(lavpartable$group) + nblocks <- lav_partable_nblocks(lavpartable) meanstructure <- any(lavpartable$op == "~1") categorical <- any(lavpartable$op == "|") if(categorical) meanstructure <- TRUE @@ -29,7 +25,7 @@ # handle variable definitions and (in)equality constraints CON <- lav_constraints_parse(partable = lavpartable, constraints = NULL, - debug = debug) + debug = lavoptions$debug) # handle *linear* equality constraints special if(CON$ceq.linear.only.flag) { @@ -43,12 +39,12 @@ } # select model matrices - if(representation == "LISREL") { + if(lavoptions$representation == "LISREL") { REP <- representation.LISREL(lavpartable, target=NULL, extra=TRUE) } else { stop("lavaan ERROR: only representation \"LISREL\" has been implemented.") } - if(debug) print(REP) + if(lavoptions$debug) print(REP) # FIXME: check for non-existing parameters bad.idx <- which(REP$mat == "" & @@ -73,33 +69,33 @@ m.free.idx <- m.user.idx <- vector(mode="list", length=nG) x.free.idx <- x.user.idx <- vector(mode="list", length=nG) - # prepare ngroups-sized slots - nvar <- integer(ngroups) + # prepare nblocks-sized slots + nvar <- integer(nblocks) nmat <- unlist(attr(REP, "mmNumber")) - num.idx <- vector("list", length=ngroups) - nexo <- integer(ngroups) - ov.x.dummy.ov.idx <- vector(mode="list", length=ngroups) - ov.x.dummy.lv.idx <- vector(mode="list", length=ngroups) - ov.y.dummy.ov.idx <- vector(mode="list", length=ngroups) - ov.y.dummy.lv.idx <- vector(mode="list", length=ngroups) + num.idx <- vector("list", length=nblocks) + nexo <- integer(nblocks) + ov.x.dummy.ov.idx <- vector(mode="list", length=nblocks) + ov.x.dummy.lv.idx <- vector(mode="list", length=nblocks) + ov.y.dummy.ov.idx <- vector(mode="list", length=nblocks) + ov.y.dummy.lv.idx <- vector(mode="list", length=nblocks) offset <- 0L - for(g in 1:ngroups) { + for(g in 1:nblocks) { - # observed and latent variables for this group - ov.names <- vnames(lavpartable, "ov", group=g) - ov.names.nox <- vnames(lavpartable, "ov.nox", group=g) - ov.names.x <- vnames(lavpartable, "ov.x", group=g) + # observed and latent variables for this block + ov.names <- lav_partable_vnames(lavpartable, "ov", block = g) + ov.names.nox <- lav_partable_vnames(lavpartable, "ov.nox", block = g) + ov.names.x <- lav_partable_vnames(lavpartable, "ov.x", block = g) nexo[g] <- length(ov.names.x) - ov.num <- vnames(lavpartable, "ov.num", group=g) - if(conditional.x) { + ov.num <- lav_partable_vnames(lavpartable, "ov.num", block = g) + if(lavoptions$conditional.x) { nvar[g] <- length(ov.names.nox) } else { nvar[g] <- length(ov.names) } num.idx[[g]] <- match(ov.num, ov.names.nox) - # model matrices for this group + # model matrices for this block mmNumber <- attr(REP, "mmNumber")[[g]] mmNames <- attr(REP, "mmNames")[[g]] mmSymmetric <- attr(REP, "mmSymmetric")[[g]] @@ -124,7 +120,7 @@ dimNames[[offset]] <- mmDimNames[[mm]] # select elements for this matrix - idx <- which(lavpartable$group == g & REP$mat == mmNames[mm]) + idx <- which(lavpartable$block == g & REP$mat == mmNames[mm]) # create empty `pattern' matrix # FIXME: one day, we may want to use sparse matrices... @@ -177,7 +173,7 @@ } # representation specific stuff - if(representation == "LISREL" && mmNames[mm] == "lambda") { + if(lavoptions$representation == "LISREL" && mmNames[mm] == "lambda") { ov.dummy.names.nox <- attr(REP, "ov.dummy.names.nox")[[g]] ov.dummy.names.x <- attr(REP, "ov.dummy.names.x")[[g]] ov.dummy.names <- c(ov.dummy.names.nox, ov.dummy.names.x) @@ -198,7 +194,7 @@ } # representation specific - if(representation == "LISREL" && mmNames[mm] == "delta") { + if(lavoptions$representation == "LISREL" && mmNames[mm] == "delta") { # only categorical values are listed in the lavpartable # but all remaining values should be 1.0 idx <- which(tmp[,1L] == 0.0) @@ -211,23 +207,23 @@ } # g # fixed.x parameters? - fixed.x <- any(lavpartable$exo > 0L & lavpartable$free == 0L) - if(categorical) { - fixed.x <- TRUE - } + #fixed.x <- any(lavpartable$exo > 0L & lavpartable$free == 0L) + #if(categorical) { + # fixed.x <- TRUE + #} - Model <- new("Model", + Model <- new("lavModel", GLIST=GLIST, dimNames=dimNames, isSymmetric=isSymmetric, mmSize=mmSize, - representation=representation, + representation=lavoptions$representation, meanstructure=meanstructure, categorical=categorical, - link=link, - control=control, - ngroups=ngroups, + link=lavoptions$link, + nblocks=nblocks, + ngroups=nblocks, # for rsem!!! group.w.free=group.w.free, nmat=nmat, nvar=nvar, @@ -269,18 +265,20 @@ con.lambda = con.lambda, nexo = nexo, - fixed.x = fixed.x, - conditional.x = conditional.x, + fixed.x = lavoptions$fixed.x, + conditional.x = lavoptions$conditional.x, #x.idx = x.idx, - parameterization = parameterization, + parameterization = lavoptions$parameterization, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, - ov.y.dummy.lv.idx = ov.y.dummy.lv.idx) + ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, + + estimator = lavoptions$estimator) - if(debug) { - cat("lavaan DEBUG: lavaanModel\n") + if(lavoptions$debug) { + cat("lavaan lavoptions$debug: lavaanModel\n") print( str(Model) ) print( Model@GLIST ) } diff -Nru r-cran-lavaan-0.5.22/R/lav_model_utils.R r-cran-lavaan-0.5.23.1097/R/lav_model_utils.R --- r-cran-lavaan-0.5.22/R/lav_model_utils.R 2016-07-18 09:48:09.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_utils.R 2017-02-21 08:35:28.000000000 +0000 @@ -56,8 +56,7 @@ } # warning: this will make a copy of lavmodel -lav_model_set_parameters <- function(lavmodel = NULL, x = NULL, - estimator = "ML") { +lav_model_set_parameters <- function(lavmodel = NULL, x = NULL) { tmp <- lavmodel@GLIST for(mm in 1:length(lavmodel@GLIST)) { @@ -70,11 +69,11 @@ if(lavmodel@categorical) { nmat <- lavmodel@nmat if(lavmodel@representation == "LISREL") { - for(g in 1:lavmodel@ngroups) { + for(g in 1:lavmodel@nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0L,nmat))[g] - if(estimator %in% c("WLS","DWLS","ULS","PML")) { + if(lavmodel@estimator %in% c("WLS","DWLS","ULS","PML")) { if(lavmodel@parameterization == "delta") { tmp[mm.in.group] <- setResidualElements.LISREL(MLIST = tmp[mm.in.group], @@ -86,7 +85,7 @@ setDeltaElements.LISREL(MLIST = tmp[mm.in.group], num.idx = lavmodel@num.idx[[g]]) } - } else if(estimator %in% c("MML", "FML")) { + } else if(lavmodel@estimator %in% c("MML", "FML")) { # ttt <- diag(tmp[mm.in.group]$theta) # diag(tmp[mm.in.group]$theta) <- as.numeric(NA) # if(length(lavmodel@num.idx[[g]]) > 0L) { @@ -148,7 +147,7 @@ if(lavmodel@categorical && setDelta && lavmodel@parameterization == "theta") { nmat <- lavmodel@nmat - for(g in 1:lavmodel@ngroups) { + for(g in 1:lavmodel@nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0L,nmat))[g] GLIST[mm.in.group] <- diff -Nru r-cran-lavaan-0.5.22/R/lav_model_vcov.R r-cran-lavaan-0.5.23.1097/R/lav_model_vcov.R --- r-cran-lavaan-0.5.22/R/lav_model_vcov.R 2016-07-18 15:52:37.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_vcov.R 2017-02-06 08:39:15.000000000 +0000 @@ -1,8 +1,7 @@ # bootstrap based NVCOV lav_model_nvcov_bootstrap <- function(lavmodel = NULL, lavsamplestats = NULL, lavoptions = NULL, lavdata = NULL, - lavcache = NULL, lavpartable = NULL, - control=list()) { + lavcache = NULL, lavpartable = NULL) { # number of bootstrap draws if(!is.null(lavoptions$bootstrap)) { @@ -26,10 +25,7 @@ type = boot.type, FUN = ifelse(boot.type == "bollen.stine", "coeftest", "coef"), - warn = -1L, - parallel = control$parallel, - ncpus = control$ncpus, - cl = control$cl) + warn = -1L) if(boot.type == "bollen.stine") { nc <- ncol(COEF) TEST <- COEF[,nc] @@ -51,11 +47,11 @@ # robust `sem' NVCOV (see Browne, 1984, bentler & dijkstra 1985) lav_model_nvcov_robust_sem <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavcache = NULL, - estimator = "ML", mimic = "lavaan", + mimic = "lavaan", use.ginv = FALSE) { # compute inverse of the expected(!) information matrix - if(estimator == "ML" && mimic == "Mplus") { + if(lavmodel@estimator == "ML" && mimic == "Mplus") { # YR - 11 aug 2010 - what Mplus seems to do is (see Muthen apx 4 eq102) # - WLS.V is not based on Sigma.hat and Mu.hat (as it # should be?), but on lavsamplestats@cov and lavsamplestats@mean... @@ -72,7 +68,6 @@ E.inv <- lav_model_information_expected(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = estimator, extra = TRUE, augmented = TRUE, inverted = TRUE, @@ -89,7 +84,8 @@ # Gamma Gamma <- lavsamplestats@NACOV - if(estimator == "ML" && mimic == "Mplus" && !lavsamplestats@NACOV.user) { + if(lavmodel@estimator == "ML" && + mimic == "Mplus" && !lavsamplestats@NACOV.user) { # 'fix' G11 part of Gamma (NOTE: this is NOT needed for SB test # statistic for(g in 1:lavsamplestats@ngroups) { @@ -112,7 +108,7 @@ # fg twice for WLS.V, 1/fg1 once for GaMMA # if fg==fg1, there would be only one fg, as in Satorra 1999 p.8 # t(Delta) * WLS.V %*% Gamma %*% WLS.V %*% Delta - if(estimator == "DWLS" || estimator == "ULS") { + if(lavmodel@estimator == "DWLS" || lavmodel@estimator == "ULS") { # diagonal weight matrix WD <- WLS.V[[g]] * Delta[[g]] } else { @@ -132,12 +128,11 @@ } lav_model_nvcov_robust_sandwich <- function(lavmodel = lavmodel, - lavsamplestats = NULL, - lavdata = NULL, - information = "observed", - lavcache = NULL, - estimator = "ML", - use.ginv = FALSE) { + lavsamplestats = NULL, + lavdata = NULL, + information = "observed", + lavcache = NULL, + use.ginv = FALSE) { # sandwich estimator: A.inv %*% B %*% t(A.inv) # where A.inv == E.inv @@ -147,7 +142,6 @@ E.inv <- lav_model_information(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = estimator, lavcache = lavcache, information = information, extra = FALSE, @@ -165,7 +159,6 @@ lav_model_information_firstorder(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = estimator, lavcache = lavcache, extra = TRUE, check.pd = FALSE, @@ -182,6 +175,147 @@ NVarCov } +# two stage +# - two.stage: Gamma = I_1^{-1} +# - robust.two.stage: Gamma = incomplete Gamma (I_1^{-1} J_1 I_1^{-1}) +# where I_1 and J_1 are based on the (saturated) model h1 +# (either unstructured, or structured) +# +# references: +# +# - Savalei \& Bentler (2009) eq (6) for se = "two.stage" +# - Savalei \& Falk (2014) eq (3) for se = "robust.two.stage" +# - Yuan \& Bentler (2000) +lav_model_nvcov_two_stage <- function(lavmodel = NULL, + lavsamplestats = NULL, + lavoptions = NULL, + lavdata = NULL, + lavimplied = NULL, + use.ginv = FALSE) { + + + # expected OR observed, depending on lavoptions$information + if(is.null(lavoptions) && is.null(lavoptions$information)) { + lavoptions <- list(information = "observed") + } + + + + # information matrix + if(lavoptions$information == "expected") { + # structured of unstructured? + if(!is.null(lavoptions) && + !is.null(lavoptions$h1.information) && + lavoptions$h1.information == "unstructured") { + structured <- FALSE + } else { + structured <- TRUE + } + + E.inv <- lav_model_information_expected(lavmodel = lavmodel, + lavsamplestats = lavsamplestats, + structured = structured, + lavdata = NULL, + extra = TRUE, + augmented = TRUE, + inverted = TRUE, + use.ginv = use.ginv) + Delta <- attr(E.inv, "Delta") + WLS.V <- attr(E.inv, "WLS.V") # this is 'H' in the literature + attr(E.inv, "Delta") <- NULL + attr(E.inv, "WLS.V") <- NULL + } else { + E.inv <- lav_model_information_observed(lavmodel = lavmodel, + lavsamplestats = lavsamplestats, + lavoptions = lavoptions, + lavdata = NULL, + augmented = TRUE, + inverted = TRUE, + use.ginv = use.ginv) + if(lavoptions$observed.information == "h1") { + Delta <- attr(E.inv, "Delta") + WLS.V <- attr(E.inv, "WLS.V") # this is 'H' in the literature + attr(E.inv, "Delta") <- NULL + attr(E.inv, "WLS.V") <- NULL + } else { + stop("lavaan ERROR: two.stage + observed information currently only works with observed.information = ", dQuote("h1")) + } + } + + # check if E.inv is ok + if(inherits(E.inv, "try-error")) { + return(E.inv) + } + + if(is.null(WLS.V)) { + stop("lavaan ERROR: WLS.V/H is NULL, observed.information = hessian?") + } + Gamma <- vector("list", length = lavsamplestats@ngroups) + + # handle multiple groups + tDVGVD <- matrix(0, ncol=ncol(E.inv), nrow=nrow(E.inv)) + for(g in 1:lavsamplestats@ngroups) { + fg <- lavsamplestats@nobs[[g]] /lavsamplestats@ntotal + #fg1 <- (lavsamplestats@nobs[[g]]-1)/lavsamplestats@ntotal + fg1 <- fg + # fg twice for WLS.V, 1/fg1 once for GaMMA + # if fg==fg1, there would be only one fg, as in Satorra 1999 p.8 + # t(Delta) * WLS.V %*% Gamma %*% WLS.V %*% Delta + WD <- WLS.V[[g]] %*% Delta[[g]] + + # to compute (incomplete) GAMMA, should we use + # structured or unstructured mean/sigma? + # + # we use the same setting as to compute 'H' (the h1 information matrix) + # so that at Omega = H if data is complete + if(lavoptions$h1.information == "unstructured") { + MU <- lavsamplestats@missing.h1[[g]]$mu + SIGMA <- lavsamplestats@missing.h1[[g]]$sigma + } else { + MU <- lavimplied$mean[[g]] + SIGMA <- lavimplied$cov[[g]] + } + + # compute 'Gamma' (or Omega.beta) + if(lavoptions$se == "two.stage") { + # this is Savalei & Bentler (2009) + if(lavoptions$information == "expected") { + Info <- lav_mvnorm_missing_information_expected( + Y = lavdata@X[[g]], Mp = lavdata@Mp[[g]], + Mu = MU, Sigma = SIGMA) + } else { + Info <- lav_mvnorm_missing_information_observed_samplestats( + Yp = lavsamplestats@missing[[g]], + Mu = MU, Sigma = SIGMA) + } + Gamma[[g]] <- lav_matrix_symmetric_inverse(Info) + } else { # we assume "robust.two.stage" + # NACOV is here incomplete Gamma + # Savalei & Falk (2014) + # + Gamma[[g]] <- lav_mvnorm_missing_h1_omega_sw(Y = + lavdata@X[[g]], Mp = lavdata@Mp[[g]], + Yp = lavsamplestats@missing[[g]], + Mu = MU, Sigma = SIGMA, + information = lavoptions$information) + } + + # compute + tDVGVD <- tDVGVD + fg*fg/fg1 * crossprod(WD, Gamma[[g]] %*% WD) + } # g + + NVarCov <- (E.inv %*% tDVGVD %*% E.inv) + + # to be reused by lavaanTest + attr(NVarCov, "Delta") <- Delta + attr(NVarCov, "Gamma") <- Gamma + #if(lavoptions$h1.information.se == lavoptions$h1.information.test) { + attr(NVarCov, "E.inv") <- E.inv + attr(NVarCov, "WLS.V") <- WLS.V + #} + + NVarCov +} lav_model_vcov <- function(lavmodel = NULL, lavsamplestats = NULL, @@ -189,10 +323,9 @@ lavdata = NULL, lavpartable = NULL, lavcache = NULL, - use.ginv = FALSE, - control=list()) { + lavimplied = NULL, + use.ginv = FALSE) { - estimator <- lavoptions$estimator likelihood <- lavoptions$likelihood information <- lavoptions$information se <- lavoptions$se @@ -212,7 +345,6 @@ NVarCov <- lav_model_information(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = estimator, lavcache = lavcache, information = information, extra = FALSE, @@ -225,7 +357,6 @@ lav_model_information_firstorder(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = estimator, lavcache = lavcache, extra = TRUE, check.pd = FALSE, @@ -237,7 +368,6 @@ NVarCov <- lav_model_nvcov_robust_sem(lavmodel = lavmodel, lavsamplestats = lavsamplestats, - estimator = estimator, mimic = mimic, lavcache = lavcache, lavdata = lavdata, @@ -250,17 +380,24 @@ lavdata = lavdata, information = information, lavcache = lavcache, - estimator = estimator, use.ginv = use.ginv) + } else if(se %in% c("two.stage", "robust.two.stage")) { + NVarCov <- + lav_model_nvcov_two_stage(lavmodel = lavmodel, + lavsamplestats = lavsamplestats, + lavoptions = lavoptions, + lavdata = lavdata, + lavimplied = lavimplied, + use.ginv = use.ginv) + } else if(se == "bootstrap") { NVarCov <- try( lav_model_nvcov_bootstrap(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavoptions = lavoptions, lavdata = lavdata, lavcache = lavcache, - lavpartable = lavpartable, - control = control), + lavpartable = lavpartable), silent=TRUE ) } else { warning("lavaan WARNING: unknown se type: ", se) @@ -269,13 +406,14 @@ if(! inherits(NVarCov, "try-error") ) { # denominator! - if(estimator %in% c("ML","PML","FML") && likelihood == "normal") { + if(lavmodel@estimator %in% c("ML","PML","FML") && + likelihood == "normal") { N <- lavsamplestats@ntotal } else { N <- lavsamplestats@ntotal - lavsamplestats@ngroups } - #if(estimator %in% c("PML", "MML")) { + #if(lavmodle@estimator %in% c("PML", "MML")) { # VarCov <- NVarCov #} else { VarCov <- 1/N * NVarCov diff -Nru r-cran-lavaan-0.5.22/R/lav_model_wls.R r-cran-lavaan-0.5.23.1097/R/lav_model_wls.R --- r-cran-lavaan-0.5.22/R/lav_model_wls.R 2016-03-27 18:36:28.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_model_wls.R 2017-02-21 08:38:00.000000000 +0000 @@ -5,7 +5,7 @@ # state or final? if(is.null(GLIST)) GLIST <- lavmodel@GLIST - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks meanstructure <- lavmodel@meanstructure categorical <- lavmodel@categorical group.w.free <- lavmodel@group.w.free @@ -27,8 +27,8 @@ GW <- computeGW(lavmodel = lavmodel, GLIST = GLIST) } - WLS.est <- vector("list", length=ngroups) - for(g in 1:ngroups) { + WLS.est <- vector("list", length=nblocks) + for(g in 1:nblocks) { # PI? if(lavmodel@conditional.x && lavmodel@nexo > 0L) { @@ -72,27 +72,45 @@ } # compute WLS.V (as a list per group) +# +# three options: +# 1) *LS: WLS.V is already in lavsamplestats +# 2) NTRLS: WLS.V needs to recomputed after every iteration, using +# the structured estimates of Sigma/Mu +# 3) ML: 3a: complete, structured (default) +# 3b: complete, unstructured +# 3c: incomplete, FIML, structured +# 3d: incomplete, FIML, unstructured +# 3e: incomplete, two.stage, structured +# ef: incomplete, two.stage, unstructured (EM estimates) lav_model_wls_v <- function(lavmodel = NULL, lavsamplestats = NULL, - estimator = "ML", + structured = TRUE, + lavimplied = NULL, lavdata = NULL) { WLS.V <- vector("list", length=lavsamplestats@ngroups) - # if we are using *LS, we already have WLS.V - if(estimator == "GLS" || estimator == "WLS") { + # 1) *LS: WLS.V is already in lavsamplestats + if(lavmodel@estimator == "GLS" || lavmodel@estimator == "WLS") { # for GLS, the WLS.V22 part is: 0.5 * t(D) %*% [S.inv %x% S.inv] %*% D # for WLS, the WLS.V22 part is: Gamma WLS.V <- lavsamplestats@WLS.V - } else if(estimator == "NTRLS") { + } else if(lavmodel@estimator == "DWLS" || lavmodel@estimator == "ULS") { + # diagonal only!! + WLS.V <- lavsamplestats@WLS.VD + + + # 2) NTRLS: based on structured estimates of Sigma/Mu + } else if(lavmodel@estimator == "NTRLS") { stopifnot(!lavmodel@conditional.x) - # compute moments for all groups - Sigma.hat <- computeSigmaHat(lavmodel = lavmodel, extra = TRUE) + + # by definition, we always use the 'structured' moments + Sigma.hat <- computeSigmaHat(lavmodel = lavmodel) Mu.hat <- computeMuHat(lavmodel = lavmodel) for(g in 1:lavsamplestats@ngroups) { WLS.V[[g]] <- lav_samplestats_Gamma_inverse_NT( - ICOV = attr(Sigma.hat[[g]],"inv")[,,drop=FALSE], COV = Sigma.hat[[g]][,,drop=FALSE], MEAN = Mu.hat[[g]], x.idx = lavsamplestats@x.idx[[g]], @@ -101,34 +119,69 @@ meanstructure = lavmodel@meanstructure, slopestructure = lavmodel@conditional.x) } - } else if(estimator == "DWLS" || estimator == "ULS") { - # diagonal only!! - WLS.V <- lavsamplestats@WLS.VD - - # for ML, we need to recompute this, as it is function of Sigma (and Mu) - } else if(estimator == "ML") { + # 3) ML: 3a: complete, structured (default) + # 3b: complete, unstructured + # 3c: incomplete, FIML, structured + # 3d: incomplete, FIML, unstructured + # 3e: incomplete, two.stage, structured + # ef: incomplete, two.stage, unstructured (EM estimates) + } else if(lavmodel@estimator == "ML") { WLS.V <- vector("list", length=lavsamplestats@ngroups) - if(lavmodel@conditional.x) { - Sigma.hat <- computeSigmaHatJoint(lavmodel = lavmodel, - lavsamplestats = lavsamplestats, extra = TRUE) - } else { - Sigma.hat <- computeSigmaHat(lavmodel = lavmodel, extra = TRUE) - } - if(lavmodel@group.w.free) { - GW <- unlist(computeGW(lavmodel = lavmodel)) - } - if(lavsamplestats@missing.flag || lavmodel@conditional.x) { + if(structured) { if(lavmodel@conditional.x) { - Mu.hat <- computeMuHatJoint(lavmodel = lavmodel, - lavsamplestats = lavsamplestats) + Sigma.hat <- computeSigmaHatJoint(lavmodel = lavmodel, + lavsamplestats = lavsamplestats) + } else { + Sigma.hat <- computeSigmaHat(lavmodel = lavmodel) + } + if(lavmodel@meanstructure) { + if(lavmodel@conditional.x) { + Mu.hat <- computeMuHatJoint(lavmodel = lavmodel, + lavsamplestats = lavsamplestats) + } else { + Mu.hat <- computeMuHat(lavmodel = lavmodel) + } } else { - Mu.hat <- computeMuHat(lavmodel = lavmodel) + Mu.hat <- NULL } } else { - Mu.hat <- NULL + if(lavmodel@conditional.x) { + # FIXME: wahat to do here? + stop("lavaan ERROR: conditional.x = TRUE, but structured = FALSE?") + } else { + # complete data: observed var/cov matrix 'S' + # two.stage + incomplete data: EM estimate of 'S' + Sigma.hat <- lavsamplestats@cov + } + if(lavmodel@meanstructure) { + if(lavmodel@conditional.x) { + # FIXME! + } else { + # complete data: observed mean vector 'ybar' + # two.stage + incomplete data: EM estimate of 'ybar' + Mu.hat <- lavsamplestats@mean + } + } else { + Mu.hat <- NULL + } + + } + + # GW? + if(lavmodel@group.w.free) { + GW <- unlist(computeGW(lavmodel = lavmodel)) } + + + # three options + # - complete data + # - incomplete data + FIML + # - incomplete data + two.stage + # two variants: + # - using unstructured moments + # - using structured moments for(g in 1:lavsamplestats@ngroups) { if(lavsamplestats@missing.flag) { stopifnot(!lavmodel@conditional.x) @@ -138,17 +191,7 @@ Mu = Mu.hat[[g]], Sigma = Sigma.hat[[g]]) } else { - # WLS.V22 = 0.5*t(D) %*% [Sigma.hat.inv %x% Sigma.hat.inv]%*% D - - # NOTE: when fixed.x=TRUE, this will give slightly different - # results for the SEs compared to <= 0.5-20 (and Mplus), - # because we set the rows/columns of exo variables to zero - # - # but this should be ok (although SEs for ~1 are somewhat - # larger) - WLS.V[[g]] <- lav_samplestats_Gamma_inverse_NT( - ICOV = attr(Sigma.hat[[g]],"inv")[,,drop=FALSE], COV = Sigma.hat[[g]][,,drop=FALSE], MEAN = Mu.hat[[g]], x.idx = lavsamplestats@x.idx[[g]], diff -Nru r-cran-lavaan-0.5.22/R/lav_modification.R r-cran-lavaan-0.5.23.1097/R/lav_modification.R --- r-cran-lavaan-0.5.22/R/lav_modification.R 2016-08-24 09:50:22.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_modification.R 2017-02-21 11:09:47.000000000 +0000 @@ -29,7 +29,9 @@ } # sanity check - if(power) standardized <- TRUE + if(power) { + standardized <- TRUE + } # extended list (fixed-to-zero parameters) strict.exo <- FALSE @@ -228,7 +230,13 @@ # remove even more columns LIST$user <- NULL - if(max(LIST$group) == 1) LIST$group <- NULL + + # remove block/group/level is only single block + if(lav_partable_nblocks(LIST) == 1L) { + LIST$block <- NULL + LIST$group <- NULL + LIST$level <- NULL + } # sort? if(sort.) { diff -Nru r-cran-lavaan-0.5.22/R/lav_muthen1984.R r-cran-lavaan-0.5.23.1097/R/lav_muthen1984.R --- r-cran-lavaan-0.5.22/R/lav_muthen1984.R 2016-08-29 14:33:24.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_muthen1984.R 2017-02-07 14:04:23.000000000 +0000 @@ -12,15 +12,21 @@ # YR 26 Nov 2015: move step1 + step2 to external functions # -muthen1984 <- function(Data, ov.names=NULL, ov.types=NULL, ov.levels=NULL, - ov.names.x=character(0L), eXo=NULL, verbose=FALSE, - missing="listwise", - WLS.W=TRUE, # do we need asymptotic variance of stats? - optim.method = "nlminb", - zero.add = c(0.5, 0.0), +muthen1984 <- function(Data = NULL, + ov.names = NULL, + ov.types = NULL, + ov.levels = NULL, + ov.names.x = character(0L), + eXo = NULL, + verbose = FALSE, + missing = "listwise", + WLS.W = TRUE, + optim.method = "nlminb", + zero.add = c(0.5, 0.0), zero.keep.margins = TRUE, - zero.cell.warn = TRUE, - group=1L) { # group only for error messages + zero.cell.warn = FALSE, + zero.cell.tables = TRUE, + group = 1L) { # group only for error messages # just in case Data is a vector Data <- as.matrix(Data) @@ -102,7 +108,11 @@ zero.add = zero.add, zero.keep.margins = zero.keep.margins, zero.cell.warn = zero.cell.warn, + zero.cell.tables = zero.cell.tables, optim.method = optim.method) + empty.cell.tables <- attr(COR, "zero.cell.tables") + attr(COR, "zero.cell.tables") <- NULL + if(verbose) { colnames(COR) <- rownames(COR) <- ov.names print(COR) @@ -117,7 +127,7 @@ out <- list(TH=TH, SLOPES=SLOPES, VAR=VAR, COR=COR, COV=COV, SC=NULL, TH.NOX=TH.NOX,TH.NAMES=TH.NAMES, TH.IDX=TH.IDX, INNER=NULL, A11=NULL, A12=NULL, A21=NULL, A22=NULL, - WLS.W=NULL, H=NULL) + WLS.W=NULL, H=NULL, zero.cell.tables=matrix("",0,2)) return(out) } @@ -303,26 +313,52 @@ ################ ################ - # A22 + # A22 (diagonal) A22 <- matrix(0, pstar, pstar) for(i in seq_len(pstar)) { A22[i,i] <- sum( SC.COR[,i]*SC.COR[,i], na.rm=TRUE ) } - # A12 + # A12 (zero) A12 <- matrix(0, NROW(A11), NCOL(A22)) - B <- rbind( cbind(A11,A12), - cbind(A21,A22) ) + #B <- rbind( cbind(A11,A12), + # cbind(A21,A22) ) + + # we invert B as a block-triangular matrix (0.5-23) + # + # B.inv = A11^{-1} 0 + # -A22^{-1} A21 A11^{-1} A22^{-1} + # - # invert! - ## FIXME: we need to invert B as a partioned matrix - B.inv <- try(solve(B), silent = TRUE) - if(inherits(B.inv, "try-error")) { + # invert A + A11.inv <- try(solve(A11), silent = TRUE) + if(inherits(A11.inv, "try-error")) { # brute force - B.inv <- MASS::ginv(B) - warning("lavaan WARNING: trouble inverting W matrix; used generalized inverse") + A11.inv <- MASS::ginv(A11) + warning("lavaan WARNING: trouble constructing W matrix; used generalized inverse for A11 submatrix") } + + # invert + da22 <- diag(A22) + if(any(da22 == 0)) { + warning("lavaan WARNING: trouble constructing W matrix; used generalized inverse for A22 submatrix") + A22.inv <- MASS::ginv(A22) + } else { + A22.inv <- A22 + diag(A22.inv) <- 1/da22 + } + + # lower-left block + A21.inv <- -A22.inv %*% A21 %*% A11.inv + + # upper-left block remains zero + A12.inv <- A12 + + # construct B.inv + B.inv <- rbind( cbind(A11.inv, A12.inv), + cbind(A21.inv, A22.inv) ) + # weight matrix (correlation metric) WLS.W <- B.inv %*% INNER %*% t(B.inv) @@ -361,7 +397,8 @@ out <- list(TH=TH, SLOPES=SLOPES, VAR=VAR, COR=COR, COV=COV, SC=SC, TH.NOX=TH.NOX,TH.NAMES=TH.NAMES, TH.IDX=TH.IDX, INNER=INNER, A11=A11, A12=A12, A21=A21, A22=A22, - WLS.W=WLS.W, H=H) + WLS.W=WLS.W, H=H, + zero.cell.tables = empty.cell.tables) out } diff -Nru r-cran-lavaan-0.5.22/R/lav_mvnorm_h1.R r-cran-lavaan-0.5.23.1097/R/lav_mvnorm_h1.R --- r-cran-lavaan-0.5.22/R/lav_mvnorm_h1.R 2016-06-26 18:50:39.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_mvnorm_h1.R 2017-01-19 18:30:20.000000000 +0000 @@ -4,12 +4,22 @@ # 1) loglikelihood h1 (from raw data, or sample statistics) # 4) hessian h1 around MLEs # 5) information h1 (restricted Sigma/mu) -# 5a: (unit) expected information h1 (A1) -# 5b: (unit) observed information h1 (A1) +# 5a: (unit) expected information h1 (A1 = Gamma.NT^{-1}) +# 5b: (unit) observed information h1 (A1 = Gamma.NT^{-1}) # 5c: (unit) first.order information h1 (B1 = A1 %*% Gamma %*% A1) +# 6) inverted information h1 mu + vech(Sigma) +# 6a: (unit) inverted expected information (A1.inv = Gamma.NT) +# 6b: (unit) inverted observed information (A1.inv = Gamma.NT) +# 6c: (unit) inverted first-order information (B1.inv) +# 7) ACOV h1 mu + vech(Sigma) +# 7a: 1/N * Gamma.NT +# 7b: 1/N * Gamma.NT +# 7c: 1/N * (Gamma.NT * Gamma^{-1} * Gamma.NT) +# 7d: 1/N * Gamma (sandwich) -# YR 25 March 2016: first version +# YR 25 Mar 2016: first version +# YR 19 Jan 2017: added 6) + 7) # 1. likelihood h1 @@ -207,4 +217,91 @@ A1 %*% Gamma %*% A1 } +# 6) inverted information h1 mu + vech(Sigma) + +# 6a: (unit) inverted expected information (A1.inv = Gamma.NT) +# 6b: (unit) inverted observed information (A1.inv = Gamma.NT) + +lav_mvnorm_h1_inverted_information_expected <- +lav_mvnorm_h1_inverted_information_observed <- function(Y = NULL, + sample.cov = NULL) { + # sample.cov + if(is.null(sample.cov)) { + sample.mean <- colMeans(Y); N <- NROW(Y) + sample.cov <- 1/N*crossprod(Y) - tcrossprod(sample.mean) + } + + I11 <- sample.cov + I22 <- 2 * lav_matrix_duplication_ginv_pre_post(sample.cov %x% sample.cov) + + Gamma.NT <- lav_matrix_bdiag(I11, I22) + + Gamma.NT +} + +# 6c: (unit) inverted first-order information (B1.inv) + +lav_mvnorm_h1_inverted_information_firstorder <- function(Y = NULL, + sample.cov = NULL, + Sinv.method = "eigen", + sample.cov.inv = NULL, + Gamma = NULL) { + # Gamma + if(is.null(Gamma)) { + Gamma <- lav_samplestats_Gamma(Y, meanstructure = TRUE) + } + + # Gamma.NT + Gamma.NT <- + lav_mvnorm_h1_inverted_information_expected(Y = Y, + sample.cov = sample.cov) + + Gamma.NT %*% solve(Gamma, Gamma.NT) +} + + +# 7) ACOV h1 mu + vech(Sigma) + +# 7a: 1/N * Gamma.NT +# 7b: 1/N * Gamma.NT +lav_mvnorm_h1_acov_expected <- +lav_mvnorm_h1_acov_observed <- function(Y = NULL, + sample.cov = NULL) { + N <- NROW(Y) + + Gamma.NT <- + lav_mvnorm_h1_inverted_information_expected(Y = Y, + sample.cov = sample.cov) + + (1/N) * Gamma.NT +} + +# 7c: 1/N * (Gamma.NT * Gamma^{-1} * Gamma.NT) +lav_mvnorm_h1_acov_firstorder <- function(Y = NULL, + sample.cov = NULL, + Sinv.method = "eigen", + sample.cov.inv = NULL, + Gamma = NULL) { + N <- NROW(Y) + + J1.inv <- lav_mvnorm_h1_inverted_information_firstorder(Y = Y, + sample.cov = sample.cov, Sinv.method = Sinv.method, + sample.cov.inv = sample.cov.inv, Gamma = Gamma) + + (1/N) * J1.inv +} + +# 7d: 1/N * Gamma (sandwich) +lav_mvnorm_h1_acov_sandwich <- function(Y = NULL, + sample.cov = NULL, + Gamma = NULL) { + N <- NROW(Y) + + # Gamma + if(is.null(Gamma)) { + Gamma <- lav_samplestats_Gamma(Y, meanstructure = TRUE) + } + + (1/N) * Gamma +} diff -Nru r-cran-lavaan-0.5.22/R/lav_mvnorm_missing_h1.R r-cran-lavaan-0.5.23.1097/R/lav_mvnorm_missing_h1.R --- r-cran-lavaan-0.5.22/R/lav_mvnorm_missing_h1.R 2016-06-21 12:07:01.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_mvnorm_missing_h1.R 2017-02-05 10:23:44.000000000 +0000 @@ -1,16 +1,177 @@ -# the multivariate normal distribution, unrestricted (h1), missing values +# the Multivariate normal distribution, unrestricted (h1), missing values # 1) loglikelihood --> same as h0 but where Mu and Sigma are unrestricted # 2) 3) 4) 5) --> (idem) -# YR 26 March 2016: first version +# YR 26 Mar 2016: first version +# YR 20 Jan 2017: added _h1_omega_sw() # here, we estimate Mu and Sigma from Y with missing values, assuming normality -# this is a rewrite of the 'estimate.moments.EM' function in <= 0.5-20 -lav_mvnorm_missing_h1_estimate_moments <- function(Y = NULL, - Mp = NULL, - verbose = FALSE, - max.iter = 500L, - tol = 1e-05) { +# this is a rewrite of the 'estimate.moments.EM' function in <= 0.5-22 +lav_mvnorm_missing_h1_estimate_moments <- function(Y = NULL, + Mp = NULL, + Yp = NULL, + Sinv.method = "eigen", + verbose = FALSE, + max.iter = 500L, + tol = 1e-05) { + # check input + Y <- as.matrix(Y); P <- NCOL(Y); N <- NROW(Y) + + # missing patterns + if(is.null(Mp)) { + Mp <- lav_data_missing_patterns(Y) + } + if(is.null(Yp)) { + Yp <- lav_samplestats_missing_patterns(Y = Y, Mp = Mp) + } + + # remove empty cases + if(length(Mp$empty.idx) > 0L) { + N <- N - length(Mp$empty.idx) + } + + # verbose? + if(verbose) { + cat("\n") + cat("lav_mvnorm_missing_h1_estimate_moments: start EM steps\n") + } + + # starting values; zero covariances to guarantee a pd matrix + Mu0 <- base::.colMeans(Y, m = N, n = P, na.rm = TRUE) + var0 <- base::.colMeans(Y*Y, m = N, n = P, na.rm = TRUE) - Mu0*Mu0 + Sigma0 <- diag(x = var0, nrow = P) + Mu <- Mu0; Sigma <- Sigma0 + + # report + if(verbose) { + #fx0 <- estimator.FIML(Sigma.hat=Sigma, Mu.hat=Mu, M=Yp) + fx0 <- lav_mvnorm_missing_loglik_samplestats(Yp = Yp, + Mu = Mu, Sigma = Sigma, + log2pi = FALSE, + minus.two = TRUE)/N + cat(" EM iteration:", sprintf("%4d", 0), + " fx = ", sprintf("%15.10f", fx0), + "\n") + } + + # EM steps + for(i in 1:max.iter) { + + # E-step + Estep <- lav_mvnorm_missing_estep(Y = Y, Mp = Mp, + Mu = Mu, Sigma = Sigma, + Sinv.method = Sinv.method) + T1 <- Estep$T1 + T2 <- Estep$T2 + + # M-step + Mu <- T1/N + Sigma <- T2/N - tcrossprod(Mu) + + # check if Sigma is near-pd (+ poor fix) + ev <- eigen(Sigma, symmetric = TRUE, only.values = TRUE) + tol <- 1e-6 # FIXME! + if(any(ev$values < tol)) { + #too.small <- which( ev$values < tol ) + #ev$values[too.small] <- tol + #ev$values <- ev$values + tol + #Sigma <- ev$vectors %*% diag(ev$values) %*% t(ev$vectors) + + # ridge + diag(Sigma) <- diag(Sigma) + max(diag(Sigma))*1e-08 + } + + # max absolute difference in parameter values + DELTA <- max(abs(c(Mu, lav_matrix_vech(Sigma)) - + c(Mu0, lav_matrix_vech(Sigma0)))) + + # report fx + if(verbose) { + #fx <- estimator.FIML(Sigma.hat=Sigma, Mu.hat=Mu, M=Yp) + fx <- lav_mvnorm_missing_loglik_samplestats(Yp = Yp, + Mu = Mu, Sigma = Sigma, + log2pi = FALSE, + minus.two = TRUE)/N + cat(" EM iteration:", sprintf("%4d", i), + " fx = ", sprintf("%15.10f", fx), + " delta par = ", sprintf("%9.8f", DELTA), + "\n") + } + + # convergence check: using parameter values: + if(DELTA < tol) + break + + # again + Mu0 <- Mu; Sigma0 <- Sigma + + } # EM iterations + + if(verbose) { + cat("\nSigma:\n"); print(Sigma) + cat("\nMu:\n"); print(Mu) + cat("\n") + } + + # compute fx if we haven't already + if(!verbose) { + #fx <- estimator.FIML(Sigma.hat = Sigma, Mu.hat = Mu, M = Yp) + fx <- lav_mvnorm_missing_loglik_samplestats(Yp = Yp, + Mu = Mu, Sigma = Sigma, + log2pi = FALSE, + minus.two = TRUE)/N + } + + list(Sigma = Sigma, Mu = Mu, fx = fx) +} + +# compute N times ACOV(Mu, vech(Sigma)) +# in the literature: - `Omega_{SW}' +# - `Gamma for incomplete data' +# - (N times the) sandwich estimator for acov(mu,vech(Sigma)) +lav_mvnorm_missing_h1_omega_sw <- function(Y = NULL, + Mp = NULL, + Yp = NULL, + Sinv.method = "eigen", + Mu = NULL, + Sigma = NULL, + Sigma.inv = NULL, + information = "observed") { + + # missing patterns + if(is.null(Mp)) { + Mp <- lav_data_missing_patterns(Y) + } + + # sample stats per pattern + if(is.null(Yp) && (information == "observed" || is.null(Sigma))) { + Yp <- lav_samplestats_missing_patterns(Y = Y, Mp = Mp) + } + + # Sigma and Mu + if(is.null(Sigma) || is.null(Mu)) { + out <- lav_mvnorm_missing_h1_estimate_moments(Y = Y, Mp = Mp, + Yp = Yp) + Mu <- out$Mu + Sigma <- out$Sigma + } + + # information matrices + info <- lav_mvnorm_missing_information_both(Y = Y, Mp = Mp, Mu = Mu, + Sigma = Sigma, Sinv.method = Sinv.method, + Sigma.inv = Sigma.inv, information = information) + + A <- info$Abeta + A.inv <- lav_matrix_symmetric_inverse(S = A, logdet = FALSE, + Sinv.method = Sinv.method) + B <- info$Bbeta + + # sandwich + SW <- A.inv %*% B %*% A.inv + + SW } + + diff -Nru r-cran-lavaan-0.5.22/R/lav_mvnorm_missing.R r-cran-lavaan-0.5.23.1097/R/lav_mvnorm_missing.R --- r-cran-lavaan-0.5.22/R/lav_mvnorm_missing.R 2016-03-26 18:43:27.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_mvnorm_missing.R 2017-01-20 15:15:25.000000000 +0000 @@ -6,9 +6,28 @@ # 3) casewise scores with respect to mu, vech(Sigma), mu + vech(Sigma) # 4) hessian of mu + vech(Sigma) # 5) (unit) information of mu + vech(Sigma) +# 5a: (unit) expected information +# 5b: (unit) observed information +# 5c: (unit) first.order information +# 5d: lav_mvnorm_missing_information_both (both observed + first.order) + +# 6) inverted information h0 mu + vech(Sigma) +# 6a: / +# 6b: / +# 6c: / +# 7) ACOV h0 mu + vech(Sigma) +# 7a: 1/N * inverted expected information +# 7b: 1/N * inverted observed information +# 7c: 1/N * inverted first-order information +# 7d: sandwich acov + +# 10) additional functions +# - lav_mvnorm_missing_impute_pattern +# - lav_mvnorm_missing_estep # YR 09 Feb 2016: first version +# YR 19 Mar 2017: 10) # 1) likelihood @@ -23,14 +42,18 @@ Sigma = NULL, casewise = FALSE, pattern = TRUE, - Sinv.method = "eigen") { + Sinv.method = "eigen", + log2pi = TRUE, + minus.two = FALSE) { if(pattern) { llik <- lav_mvnorm_missing_llik_pattern(Y = Y, Mu = Mu, - Sigma = Sigma, Sinv.method = Sinv.method) + Sigma = Sigma, Sinv.method = Sinv.method, + log2pi = log2pi, minus.two = minus.two) } else { llik <- lav_mvnorm_missing_llik_casewise(Y = Y, Mu = Mu, - Sigma = Sigma, Sinv.method = Sinv.method) + Sigma = Sigma, Sinv.method = Sinv.method, + log2pi = log2pi, minus.two = minus.two) } if(casewise) { @@ -46,7 +69,9 @@ lav_mvnorm_missing_loglik_samplestats <- function(Yp = NULL, Mu = NULL, Sigma = NULL, - Sinv.method = "eigen") { + Sinv.method = "eigen", + log2pi = TRUE, + minus.two = FALSE) { LOG.2PI <- log(2*pi); pat.N <- length(Yp); P <- length(Yp[[1]]$var.idx) @@ -85,8 +110,16 @@ DIST[p] <- sum(sigma.inv * TT) * Yp[[p]]$freq } - # compute loglikelihoods per pattern - loglik <- sum(-(P.LOG.2PI + logdet + DIST)/2) + # loglikelihood all data + if(log2pi) { + loglik <- sum(-(P.LOG.2PI + logdet + DIST)/2) + } else { + loglik <- sum(-(logdet + DIST)/2) + } + + if(minus.two) { + loglik <- -2 * loglik + } loglik } @@ -97,7 +130,9 @@ lav_mvnorm_missing_llik_casewise <- function(Y = NULL, Mu = NULL, Sigma = NULL, - Sinv.method = "eigen") { + Sinv.method = "eigen", + log2pi = TRUE, + minus.two = FALSE) { P <- NCOL(Y); N <- NROW(Y); LOG.2PI <- log(2*pi); Mu <- as.numeric(Mu) @@ -138,20 +173,25 @@ if(length(na.idx) == P) next # invert Sigma for this pattern - if(length(na.idx) > 0L) { - sigma.inv <- lav_matrix_symmetric_inverse_update(S.inv = Sigma.inv, + sigma.inv <- lav_matrix_symmetric_inverse_update(S.inv = Sigma.inv, rm.idx = na.idx, logdet = TRUE, S.logdet = Sigma.logdet) - logdet[i] <- attr(sigma.inv, "logdet") - } else { - sigma.inv <- Sigma.inv - logdet[i] <- Sigma.logdet - } + logdet[i] <- attr(sigma.inv, "logdet") + # distance for this case DIST[i] <- sum(sigma.inv * crossprod(Yc[i, OBS[i,], drop = FALSE])) } # compute casewise loglikelihoods - llik <- -(P.LOG.2PI + logdet + DIST)/2 + if(log2pi) { + llik <- -(P.LOG.2PI + logdet + DIST)/2 + } else { + llik <- -(logdet + DIST)/2 + } + + # minus.two + if(minus.two) { + llik <- -2 * llik + } llik } @@ -161,7 +201,9 @@ Mp = NULL, Mu = NULL, Sigma = NULL, - Sinv.method = "eigen") { + Sinv.method = "eigen", + log2pi = TRUE, + minus.two = FALSE) { P <- NCOL(Y); N <- NROW(Y); LOG.2PI <- log(2*pi); Mu <- as.numeric(Mu) @@ -218,7 +260,16 @@ } # compute casewise loglikelihoods - llik <- -(P.LOG.2PI + logdet + DIST)/2 + if(log2pi) { + llik <- -(P.LOG.2PI + logdet + DIST)/2 + } else { + llik <- -(logdet + DIST)/2 + } + + # minus.two + if(minus.two) { + llik <- -2 * llik + } llik } @@ -312,7 +363,7 @@ Mp <- lav_data_missing_patterns(Y) } - # for each pattern, compute Yc %*% sigma.inv + # for each pattern for(p in seq_len(Mp$npatterns)) { # observed values for this pattern @@ -367,7 +418,7 @@ # dvechSigma dSigma <- matrix(0, P, P) - # for each pattern, compute Yc %*% sigma.inv + # for each pattern for(p in seq_len(pat.N)) { # observed variables for this pattern @@ -429,7 +480,7 @@ # dvechSigma dvechSigma <- numeric(P*(P+1)/2) - # for each pattern, compute Yc %*% sigma.inv + # for each pattern for(p in seq_len(pat.N)) { # observed variables for this pattern @@ -499,6 +550,9 @@ # for each pattern, compute sigma.inv for(p in seq_len(Mp$npatterns)) { + # observed values for this pattern + var.idx <- Mp$pat[p,] + # missing values for this pattern na.idx <- which(!var.idx) @@ -553,7 +607,7 @@ # SC SC <- matrix(as.numeric(NA), nrow = N, ncol = length(iSigma)) - # for each pattern, compute Yc %*% sigma.inv + # for each pattern for(p in seq_len(Mp$npatterns)) { # observed values for this pattern @@ -1018,3 +1072,216 @@ list(Abeta = Abeta, Bbeta = Bbeta) } + +# 6) inverted information h0 mu + vech(Sigma) + +# 6a: (unit) inverted expected information +# NOT USED: is not equal to solve(expected) +# (although it does converge to the same solution eventually) +# lav_mvnorm_missing_inverted_information_expected <- function(Y = NULL, +# Mp = NULL, +# Mu = NULL,# unused +# Sigma = NULL) { +# P <- NCOL(Y) +# +# # missing patterns +# if(is.null(Mp)) { +# Mp <- lav_data_missing_patterns(Y) +# } +# +# # N +# N <- sum(Mp$freq) # removed empty cases! +# +# I11 <- matrix(0, P, P) +# I22 <- matrix(0, P*(P+1)/2, P*(P+1)/2) +# +# # for each pattern +# for(p in seq_len(Mp$npatterns)) { +# +# # observed variables +# var.idx <- Mp$pat[p,] +# +# sigma <- matrix(0, P, P) +# sigma[var.idx, var.idx] <- Sigma[var.idx, var.idx] +# sigma2 <- 2 * lav_matrix_duplication_ginv_pre_post(sigma %x% sigma) +# +# I11 <- I11 + Mp$freq[p] * sigma +# I22 <- I22 + Mp$freq[p] * sigma2 +# } +# +# lav_matrix_bdiag(I11, I22)/N +#} + +# 6b: / + +# 6c: / + + +# 7) ACOV h0 mu + vech(Sigma) + +# 7a: 1/N * inverted expected information + +# 7b: 1/N * inverted observed information + +# 7c: 1/N * inverted first-order information + +# 7d: sandwich acov + + + +# 10) other stuff + +# single imputation missing cells, under the normal model, pattern-based +lav_mvnorm_missing_impute_pattern <- function(Y = NULL, + Mp = NULL, + Mu = NULL, + Sigma = NULL, + Sigma.inv = NULL, + Sinv.method = "eigen") { + + Mu <- as.numeric(Mu) + + # complete data + Y.complete <- Y + + # missing patterns + if(is.null(Mp)) { + Mp <- lav_data_missing_patterns(Y) + } + + if(is.null(Sigma.inv)) { + # invert Sigma + Sigma.inv <- lav_matrix_symmetric_inverse(S = Sigma, logdet = FALSE, + Sinv.method = Sinv.method) + } + + # subtract Mu + Yc <- t( t(Y) - Mu ) + + # fill in data per pattern + for(p in seq_len(Mp$npatterns)) { + + # observed values for this pattern + var.idx <- Mp$pat[p,] + + # if complete, nothing to do + if(all(var.idx)) { + next + } + + # missing values for this pattern + na.idx <- which(!var.idx) + + # extract observed data for these (centered) cases + Oc <- Yc[Mp$case.idx[[p]], Mp$pat[p, ], drop = FALSE] + + # invert Sigma (Sigma_22, observed part only) for this pattern + Sigma_22.inv <- try(lav_matrix_symmetric_inverse_update(S.inv = + Sigma.inv, rm.idx = na.idx, logdet = FALSE), + silent = TRUE) + if(inherits(Sigma_22.inv, "try-error")) { + stop("lavaan ERROR: Sigma_22.inv cannot be inverted") + } + + # estimate missing values in this pattern + Sigma_12 <- Sigma[!var.idx, var.idx, drop=FALSE] + Y.missing <- t( Sigma_12 %*% Sigma_22.inv %*% t(Oc) + Mu[!var.idx] ) + + # complete data for this pattern + Y.complete[Mp$case.idx[[p]], !var.idx] <- Y.missing + } + + Y.complete +} + + +# E-step: expectations of sum, sum of squares, sum of crossproducts +# plus correction +lav_mvnorm_missing_estep <- function(Y = NULL, + Mp = NULL, + Mu = NULL, + Sigma = NULL, + Sigma.inv = NULL, + Sinv.method = "eigen") { + + P <- NCOL(Y); Mu <- as.numeric(Mu) + + # missing patterns + if(is.null(Mp)) { + Mp <- lav_data_missing_patterns(Y) + } + + if(is.null(Sigma.inv)) { + # invert Sigma + Sigma.inv <- lav_matrix_symmetric_inverse(S = Sigma, logdet = FALSE, + Sinv.method = Sinv.method) + } + + # T1, T2 + T1 <- numeric(P) + T2 <- matrix(0, P, P) + + # update T1 and T2 per pattern + for(p in seq_len(Mp$npatterns)) { + + # observed values for this pattern + var.idx <- Mp$pat[p,] + + # missing values for this pattern + na.idx <- which(!var.idx) + + # extract observed data + O <- Y[Mp$case.idx[[p]], Mp$pat[p, ], drop = FALSE] + + # if complete, just compute first and second moments + if(all(var.idx)) { + # complete pattern + T1 <- T1 + colSums(O) + T2 <- T2 + crossprod(O) + next + } + + # missing values for this pattern + na.idx <- which(!var.idx) + + # partition Sigma (1=missing, 2=complete) + Sigma_11 <- Sigma[!var.idx, !var.idx, drop=FALSE] + Sigma_12 <- Sigma[!var.idx, var.idx, drop=FALSE] + Sigma_21 <- Sigma[ var.idx, !var.idx, drop=FALSE] + + # invert Sigma (Sigma_22, observed part only) for this pattern + Sigma_22.inv <- try(lav_matrix_symmetric_inverse_update(S.inv = + Sigma.inv, rm.idx = na.idx, logdet = FALSE), + silent = TRUE) + if(inherits(Sigma_22.inv, "try-error")) { + stop("lavaan ERROR: Sigma_22.inv cannot be inverted") + } + + # estimate missing values in this pattern + Oc <- t( t(O) - Mu[var.idx]) + Y.missing <- t( Sigma_12 %*% Sigma_22.inv %*% t(Oc) + Mu[!var.idx] ) + + # complete data for this pattern + Y.complete <- matrix(0, Mp$freq[[p]], P) + Y.complete[, var.idx] <- O + Y.complete[,!var.idx] <- Y.missing + + # 1. SUM `completed' pattern + T1.pat <- colSums(Y.complete) + + # 2. CROSSPROD `completed' pattern + T2.pat <- crossprod(Y.complete) + + # correction for missing cells: conditional covariances + T2.p11 <- Sigma_11 - (Sigma_12 %*% Sigma_22.inv %*% Sigma_21) + T2.pat[!var.idx, !var.idx] <- + T2.pat[!var.idx, !var.idx] + (T2.p11 * Mp$freq[[p]]) + + # accumulate + T1 <- T1 + T1.pat + T2 <- T2 + T2.pat + } + + list(T1 = T1, T2 = T2) +} + diff -Nru r-cran-lavaan-0.5.22/R/lav_mvnorm.R r-cran-lavaan-0.5.23.1097/R/lav_mvnorm.R --- r-cran-lavaan-0.5.22/R/lav_mvnorm.R 2016-03-27 18:55:45.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_mvnorm.R 2017-01-19 16:49:39.000000000 +0000 @@ -4,14 +4,75 @@ # 2) derivatives with respect to mu, Sigma, vech(Sigma) # 3) casewise scores with respect to mu, vech(Sigma), mu + vech(Sigma) # 4) hessian mu + vech(Sigma) -# 5) information h0 (restricted Sigma/mu) -# 5a: (unit) expected information of mu + vech(Sigma) h0 -# 5b: (unit) observed information h0 -# 5c: (unit) first.order information h0 - +# 5) information h0 mu + vech(Sigma) +# 5a: (unit) expected information +# 5b: (unit) observed information +# 5c: (unit) first.order information +# 6) inverted information h0 mu + vech(Sigma) +# 6a: (unit) inverted expected information +# 6b: / +# 6c: / +# 7) ACOV h0 mu + vech(Sigma) +# 7a: 1/N * inverted expected information +# 7b: 1/N * inverted observed information +# 7c: 1/N * inverted first-order information +# 7d: sandwich acov # YR 07 Feb 2016: first version # YR 24 Mar 2016: added firstorder information, hessian logl +# YR 19 Jan 2017: added lav_mvnorm_inverted_information_expected + +# 0. densities +lav_mvnorm_dmvnorm <- function(Y = NULL, + Mu = NULL, + Sigma = NULL, + Sigma.inv = NULL, + Sinv.method = "eigen", + log = TRUE) { + + if(is.matrix(Y)) { + if(is.null(Mu) && is.null(Sigma) && is.null(Sigma.inv)) { + out <- lav_mvnorm_loglik_data_z(Y = Y, casewise = TRUE) + } else { + out <- lav_mvnorm_loglik_data(Y = Y, Mu = Mu, Sigma = Sigma, + casewise = TRUE, + Sinv.method = Sinv.method) + } + } else { + # just one + P <- length(Y); LOG.2PI <- log(2 * pi) + + if(is.null(Mu) && is.null(Sigma) && is.null(Sigma.inv)) { + # mahalanobis distance + DIST <- sum(Y * Y) + out <- -(P * LOG.2PI + DIST)/2 + } else { + if(is.null(Sigma.inv)) { + Sigma.inv <- lav_matrix_symmetric_inverse(S = Sigma, + logdet = TRUE, Sinv.method = Sinv.method) + logdet <- attr(Sigma.inv, "logdet") + } else { + logdet <- attr(Sigma.inv, "logdet") + if(is.null(logdet)) { + # compute - ln|Sigma.inv| + ev <- eigen(Sigma.inv, symmetric = TRUE, only.values = TRUE) + logdet <- -1 * sum(log(ev$values)) + } + } + + # mahalanobis distance + Yc <- Y - Mu + DIST <- sum(Yc %*% Sigma.inv * Yc) + out <- -(P * LOG.2PI + logdet + DIST)/2 + } + } + + if(!log) { + out <- exp(out) + } + + out +} # 1. likelihood @@ -99,6 +160,29 @@ loglik } +# 1c special case: Mu = 0, Sigma = I +lav_mvnorm_loglik_data_z <- function(Y = NULL, + casewise = FALSE) { + P <- NCOL(Y); N <- NROW(Y); LOG.2PI <- log(2 * pi) + + if(casewise) { + DIST <- rowSums(Y * Y) + loglik <- -(P * LOG.2PI + DIST)/2 + } else { + sample.mean <- colMeans(Y) + sample.cov <- 1/N*crossprod(Y) - tcrossprod(sample.mean) + + DIST1 <- sum(diag(sample.cov)) + DIST2 <- sum(sample.mean * sample.mean) + + loglik <- -N/2 * (P * LOG.2PI + DIST1 + DIST2) + } + + loglik +} + + + # 2. Derivatives @@ -353,7 +437,7 @@ # sample statistics sample.mean <- colMeans(Y) - sample.cov <- cov(Y) * (N-1)/N + sample.cov <- 1/N*crossprod(Y) - tcrossprod(sample.mean) lav_mvnorm_information_observed_samplestats(sample.mean = sample.mean, sample.cov = sample.cov, Mu = Mu, Sigma = Sigma, @@ -412,3 +496,43 @@ } +# 6: inverted information h0 + +# 6a: inverted unit expected information h0 Mu and vech(Sigma) +lav_mvnorm_inverted_information_expected <- function(Y = NULL, # unused! + Mu = NULL, # unused! + Sigma = NULL, + meanstructure = TRUE) { + + I22 <- 2 * lav_matrix_duplication_ginv_pre_post(Sigma %x% Sigma) + + if(meanstructure) { + I11 <- Sigma + out <- lav_matrix_bdiag(I11, I22) + } else { + out <- I22 + } + + out +} + +# 6b: inverted unit observed information h0 + +# one could use the inverse of a partitioned matrix, but that does not +# seem to help much... unless we can find an expression for solve(I22) + +# 6c: inverted unit first-order information h0 +# / + + +# 7) ACOV h0 mu + vech(Sigma) +# not implemented, as too trivial + +# 7a: 1/N * inverted expected information + +# 7b: 1/N * inverted observed information + +# 7c: 1/N * inverted first-order information + +# 7d: sandwich acov + diff -Nru r-cran-lavaan-0.5.22/R/lav_mvreg.R r-cran-lavaan-0.5.23.1097/R/lav_mvreg.R --- r-cran-lavaan-0.5.22/R/lav_mvreg.R 2016-03-24 16:04:23.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_mvreg.R 2017-01-14 18:58:37.000000000 +0000 @@ -14,11 +14,6 @@ Sinv.method = "eigen") { Q <- NCOL(Y); N <- NROW(Y) - # invert Sigma - Sigma.inv <- lav_matrix_symmetric_inverse(S = Sigma, logdet = TRUE, - Sinv.method = Sinv.method) - logdet <- attr(Sigma.inv, "logdet") - if(casewise) { LOG.2PI <- log(2 * pi) @@ -44,6 +39,7 @@ # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse(S = Sigma, logdet = TRUE, Sinv.method = Sinv.method) + logdet <- attr(Sigma.inv, "logdet") RES <- Y - X %*% Beta # TOTAL <- TR( (Y - X%*%Beta) %*% Sigma.inv %*% t(Y - X%*%Beta) ) @@ -86,9 +82,9 @@ # tr( Sigma^{-1} (B-beta)' X'X (B-beta) Diff <- sample.res.beta - Beta - DIST2 <- sum(Sigma.inv * crossprod(Diff, sample.XX) %*% Diff) + DIST2 <- sum(Sigma.inv * crossprod(Diff, (1/N)*sample.XX) %*% Diff) - loglik <- -(1/2) * (N*Q*log(2*pi) + N*logdet + N*DIST1 + DIST2) + loglik <- -(N/2) * (Q*log(2*pi) + logdet + DIST1 + DIST2) loglik } @@ -234,7 +230,7 @@ Z <- RES[,idx1] * RES[,idx2] # substract isigma from each row - SC <- sweep(Z, 2L, STATS = isigma, FUN = "-") + SC <- t( t(Z) - isigma ) # adjust for vech (and avoiding the 1/2 factor) SC[,lav_matrix_diagh_idx(Q)] <- SC[,lav_matrix_diagh_idx(Q)] / 2 @@ -276,7 +272,7 @@ Z <- RES[,idx1] * RES[,idx2] # substract isigma from each row - SC <- sweep(Z, 2L, STATS = isigma, FUN = "-") + SC <- t( t(Z) - isigma ) # adjust for vech (and avoiding the 1/2 factor) SC[,lav_matrix_diagh_idx(Q)] <- SC[,lav_matrix_diagh_idx(Q)] / 2 @@ -316,14 +312,14 @@ } RES <- Y - X %*% Beta - W.tilde <- crossprod(RES) + W.tilde <- 1/N * crossprod(RES) H11 <- Sigma.inv %x% ((1/N) * crossprod(X)) H21 <- lav_matrix_duplication_pre( Sigma.inv %x% (Sigma.inv %*% ((1/N) * crossprod(RES, X))) ) H12 <- t(H21) - AAA <- Sigma.inv %*% ((2/N)*W.tilde - Sigma) %*% Sigma.inv + AAA <- Sigma.inv %*% (2*W.tilde - Sigma) %*% Sigma.inv H22 <- (1/2) * lav_matrix_duplication_pre_post(Sigma.inv %x% AAA) H <- -N * rbind( cbind(H11, H12), diff -Nru r-cran-lavaan-0.5.22/R/lav_object_generate.R r-cran-lavaan-0.5.23.1097/R/lav_object_generate.R --- r-cran-lavaan-0.5.22/R/lav_object_generate.R 2015-09-04 11:54:24.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_object_generate.R 2017-02-21 11:01:47.000000000 +0000 @@ -31,19 +31,10 @@ lavoptions$do.fit <- TRUE # verbose? - if(verbose) { - lavoptions$verbose <- TRUE - } else { - lavoptions$verbose <- FALSE - } + lavoptions$verbose <- verbose # warn? - if(warn) { - lavoptions$warn <- TRUE - } else { - lavoptions$warn <- FALSE - } - + lavoptions$warn <- warn # needed? if(any(lavpartable$op == "~1")) lavoptions$meanstructure <- TRUE @@ -116,7 +107,7 @@ do.fit = FALSE) { # partable original model - partable <- object@ParTable[c("lhs","op","rhs","group","free", + partable <- object@ParTable[c("lhs","op","rhs","block","free", "exo","label","plabel")] # do we need 'exo'? if(all.free) { partable$user <- rep(1L, length(partable$lhs)) @@ -139,8 +130,9 @@ !is.null(add$rhs)) ADD <- add } else if(is.character(add)) { - ADD <- lavaanify(add, ngroups = object@Data@ngroups) - ADD <- ADD[,c("lhs","op","rhs","group","user","label")] + ngroups <- lav_partable_ngroups(partable) + ADD <- lavaanify(add, ngroups = ngroups) + ADD <- ADD[,c("lhs","op","rhs","block","user","label")] remove.idx <- which(ADD$user == 0) if(length(remove.idx) > 0L) { ADD <- ADD[-remove.idx,] @@ -171,24 +163,18 @@ lavoptions <- object@Options # verbose? - if(verbose) { - lavoptions$verbose <- TRUE - } else { - lavoptions$verbose <- FALSE - } + lavoptions$verbose <- verbose # warn? - if(warn) { - lavoptions$warn <- TRUE - } else { - lavoptions$warn <- FALSE - } + lavoptions$warn <- warn + + # do.fit? + lavoptions$do.fit <- do.fit # needed? if(any(LIST$op == "~1")) lavoptions$meanstructure <- TRUE FIT <- lavaan(LIST, - do.fit = do.fit, slotOptions = lavoptions, slotSampleStats = object@SampleStats, slotData = object@Data, diff -Nru r-cran-lavaan-0.5.22/R/lav_object_inspect.R r-cran-lavaan-0.5.23.1097/R/lav_object_inspect.R --- r-cran-lavaan-0.5.22/R/lav_object_inspect.R 2016-09-24 13:46:15.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_object_inspect.R 2017-02-21 16:31:09.000000000 +0000 @@ -1,39 +1,38 @@ # inspect a fitted lavaan object # backward compatibility -- wrapper around lavInspect -setMethod("inspect", "lavaan", -function(object, what = "free") { - lavInspect(lavobject = object, - what = what, - add.labels = TRUE, - add.class = TRUE, - drop.list.single.group = TRUE) -}) +inspect.lavaan <- function(object, what = "free", ...) { + lavInspect.lavaan(object = object, + what = what, + add.labels = TRUE, + add.class = TRUE, + drop.list.single.group = TRUE) +} # the `tech' version: no labels, full matrices, ... for further processing -lavTech <- function(lavobject, - what = "free", - add.labels = FALSE, - add.class = FALSE, - list.by.group = FALSE, - drop.list.single.group = FALSE) { - - lavInspect(lavobject = lavobject, what = what, - add.labels = add.labels, add.class = add.class, - list.by.group = list.by.group, - drop.list.single.group = drop.list.single.group) +lavTech.lavaan <- function(object, + what = "free", + add.labels = FALSE, + add.class = FALSE, + list.by.group = FALSE, + drop.list.single.group = FALSE) { + + lavInspect.lavaan(object, what = what, + add.labels = add.labels, add.class = add.class, + list.by.group = list.by.group, + drop.list.single.group = drop.list.single.group) } # the `user' version: with defaults for display only -lavInspect <- function(lavobject, - what = "free", - add.labels = TRUE, - add.class = TRUE, - list.by.group = TRUE, - drop.list.single.group = TRUE) { +lavInspect.lavaan <- function(object, + what = "free", + add.labels = TRUE, + add.class = TRUE, + list.by.group = TRUE, + drop.list.single.group = TRUE) { - # lavobject must inherit from class lavaan - stopifnot(inherits(lavobject, "lavaan")) + # object must inherit from class lavaan + stopifnot(inherits(object, "lavaan")) # only a single argument if(length(what) > 1) { @@ -46,57 +45,60 @@ #### model matrices, with different contents #### if(what == "free") { - lav_object_inspect_modelmatrices(lavobject, what = "free", + lav_object_inspect_modelmatrices(object, what = "free", type = "free", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) + } else if(what == "impute" || + what == "imputed") { # just to ease the transition for semTools! + object@imputed } else if(what == "partable" || what == "user") { - lav_object_inspect_modelmatrices(lavobject, what = "free", + lav_object_inspect_modelmatrices(object, what = "free", type="partable", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if(what == "se" || what == "std.err" || what == "standard.errors") { - lav_object_inspect_modelmatrices(lavobject, what = "se", + lav_object_inspect_modelmatrices(object, what = "se", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if(what == "start" || what == "starting.values") { - lav_object_inspect_modelmatrices(lavobject, what = "start", + lav_object_inspect_modelmatrices(object, what = "start", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if(what == "est" || what == "estimates" || what == "coef" || what == "coefficients" || what == "x") { - lav_object_inspect_modelmatrices(lavobject, what = "est", + lav_object_inspect_modelmatrices(object, what = "est", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, #list.by.group = FALSE, for semTools only drop.list.single.group = drop.list.single.group) } else if(what == "dx.free") { - lav_object_inspect_modelmatrices(lavobject, what = "dx.free", + lav_object_inspect_modelmatrices(object, what = "dx.free", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if(what == "dx.all") { - lav_object_inspect_modelmatrices(lavobject, what = "dx.all", + lav_object_inspect_modelmatrices(object, what = "dx.all", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if(what == "std" || what == "std.all" || what == "standardized") { - lav_object_inspect_modelmatrices(lavobject, what = "std.all", + lav_object_inspect_modelmatrices(object, what = "std.all", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if(what == "std.lv") { - lav_object_inspect_modelmatrices(lavobject, what = "std.lv", + lav_object_inspect_modelmatrices(object, what = "std.lv", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if(what == "std.nox") { - lav_object_inspect_modelmatrices(lavobject, what = "std.nox", + lav_object_inspect_modelmatrices(object, what = "std.nox", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) @@ -104,21 +106,21 @@ #### parameter table #### } else if(what == "list") { - parTable(lavobject) + parTable(object) #### fit indices #### } else if(what == "fit" || what == "fitmeasures" || what == "fit.measures" || what == "fit.indices") { - fitMeasures(lavobject) + fitMeasures(object) #### modification indices #### } else if(what == "mi" || what == "modindices" || what == "modification.indices") { - modificationIndices(lavobject) + modificationIndices(object) #### sample statistics ##### @@ -130,25 +132,25 @@ what == "samp" || what == "sample" || what == "samplestatistics") { - lav_object_inspect_sampstat(lavobject, h1 = FALSE, + lav_object_inspect_sampstat(object, h1 = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "h1" || what == "missing.h1" || what == "sampstat.h1") { - lav_object_inspect_sampstat(lavobject, h1 = TRUE, + lav_object_inspect_sampstat(object, h1 = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### wls.est - wls.obs - wls.v #### } else if(what == "wls.est") { - lav_object_inspect_wls_est(lavobject, + lav_object_inspect_wls_est(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "wls.obs") { - lav_object_inspect_wls_obs(lavobject, + lav_object_inspect_wls_obs(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "wls.v") { - lav_object_inspect_wls_v(lavobject, + lav_object_inspect_wls_v(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) @@ -156,108 +158,112 @@ #### data + missingness #### } else if(what == "data") { - lav_object_inspect_data(lavobject, + lav_object_inspect_data(object, drop.list.single.group = drop.list.single.group) } else if(what == "case.idx") { - lav_object_inspect_case_idx(lavobject, + lav_object_inspect_case_idx(object, drop.list.single.group = drop.list.single.group) } else if(what == "ngroups") { - lavobject@Data@ngroups + object@Data@ngroups } else if(what == "group") { - lavobject@Data@group + object@Data@group + } else if(what == "cluster") { + object@Data@cluster + } else if(what == "ordered") { + object@Data@ordered } else if(what == "group.label") { - lavobject@Data@group.label + object@Data@group.label } else if(what == "nobs") { - unlist( lavobject@Data@nobs ) + unlist( object@Data@nobs ) } else if(what == "norig") { - unlist( lavobject@Data@norig ) + unlist( object@Data@norig ) } else if(what == "ntotal") { - sum(unlist( lavobject@Data@nobs )) + sum(unlist( object@Data@nobs )) } else if(what == "coverage") { - lav_object_inspect_missing_coverage(lavobject, + lav_object_inspect_missing_coverage(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what %in% c("patterns", "pattern")) { - lav_object_inspect_missing_patterns(lavobject, + lav_object_inspect_missing_patterns(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "empty.idx") { - lav_object_inspect_empty_idx(lavobject, + lav_object_inspect_empty_idx(object, drop.list.single.group = drop.list.single.group) #### rsquare #### } else if(what == "rsquare" || what == "r-square" || what == "r2") { - lav_object_inspect_rsquare(lavobject, + lav_object_inspect_rsquare(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### model-implied sample statistics #### } else if(what == "implied" || what == "fitted") { - lav_object_inspect_implied(lavobject, + lav_object_inspect_implied(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "resid" || what == "res" || what == "residual" || what == "residuals") { - lav_object_inspect_residuals(lavobject, h1 = TRUE, + lav_object_inspect_residuals(object, h1 = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "cov.lv" || what == "veta") { - lav_object_inspect_cov_lv(lavobject, + lav_object_inspect_cov_lv(object, correlation.metric = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "cor.lv") { - lav_object_inspect_cov_lv(lavobject, + lav_object_inspect_cov_lv(object, correlation.metric = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "mean.lv" || what == "eeta") { - lav_object_inspect_mean_lv(lavobject, + lav_object_inspect_mean_lv(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "cov.all") { - lav_object_inspect_cov_all(lavobject, + lav_object_inspect_cov_all(object, correlation.metric = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "cor.all") { - lav_object_inspect_cov_all(lavobject, + lav_object_inspect_cov_all(object, correlation.metric = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "cov.ov" || what == "sigma" || what == "sigma.hat") { - lav_object_inspect_cov_ov(lavobject, + lav_object_inspect_cov_ov(object, correlation.metric = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "cor.ov") { - lav_object_inspect_cov_ov(lavobject, + lav_object_inspect_cov_ov(object, correlation.metric = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "mean.ov" || what == "mu" || what == "mu.hat") { - lav_object_inspect_mean_ov(lavobject, + lav_object_inspect_mean_ov(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "th" || what == "thresholds") { - lav_object_inspect_th(lavobject, + lav_object_inspect_th(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "vy") { - lav_object_inspect_vy(lavobject, + lav_object_inspect_vy(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### specific model matrices? #### } else if(what == "theta" || what == "theta.cov") { - lav_object_inspect_theta(lavobject, correlation.metric = FALSE, + lav_object_inspect_theta(object, correlation.metric = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if(what == "theta.cor") { - lav_object_inspect_theta(lavobject, correlation.metric = TRUE, + lav_object_inspect_theta(object, correlation.metric = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) @@ -265,157 +271,163 @@ #### convergence, meanstructure, categorical #### } else if(what == "converged") { - lavobject@optim$converged + object@optim$converged } else if(what == "iterations" || what == "iter" || what == "niter") { - lavobject@optim$iterations + object@optim$iterations } else if(what == "meanstructure") { - lavobject@Model@meanstructure + object@Model@meanstructure } else if(what == "categorical") { - lavobject@Model@categorical + object@Model@categorical } else if(what == "fixed.x") { - lavobject@Model@fixed.x + object@Model@fixed.x } else if(what == "parameterization") { - lavobject@Model@parameterization + object@Model@parameterization #### NACOV samplestats #### } else if(what == "gamma") { - lav_object_inspect_sampstat_gamma(lavobject, + lav_object_inspect_sampstat_gamma(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### gradient, Hessian, information, first.order, vcov #### } else if(what == "gradient") { - lav_object_inspect_gradient(lavobject, + lav_object_inspect_gradient(object, add.labels = add.labels, add.class = add.class) } else if(what == "hessian") { - lav_object_inspect_hessian(lavobject, + lav_object_inspect_hessian(object, add.labels = add.labels, add.class = add.class) } else if(what == "information") { - lav_object_inspect_information(lavobject, information = "default", + lav_object_inspect_information(object, information = "default", augmented = FALSE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if(what == "information.expected") { - lav_object_inspect_information(lavobject, information = "expected", + lav_object_inspect_information(object, information = "expected", augmented = FALSE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if(what == "information.observed") { - lav_object_inspect_information(lavobject, information = "observed", + lav_object_inspect_information(object, information = "observed", augmented = FALSE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if(what == "information.first.order" || what == "first.order") { - lav_object_inspect_information(lavobject, information = "first.order", + lav_object_inspect_information(object, information = "first.order", augmented = FALSE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if(what == "augmented.information") { - lav_object_inspect_information(lavobject, information = "default", + lav_object_inspect_information(object, information = "default", augmented = TRUE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if(what == "augmented.information.expected") { - lav_object_inspect_information(lavobject, information = "expected", + lav_object_inspect_information(object, information = "expected", augmented = TRUE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if(what == "augmented.information.observed") { - lav_object_inspect_information(lavobject, information = "observed", + lav_object_inspect_information(object, information = "observed", augmented = TRUE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if(what == "augmented.information.first.order" || what == "augmented.first.order") { - lav_object_inspect_information(lavobject, information = "first.order", + lav_object_inspect_information(object, information = "first.order", augmented = TRUE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if(what == "inverted.information") { - lav_object_inspect_information(lavobject, information = "default", + lav_object_inspect_information(object, information = "default", augmented = TRUE, inverted = TRUE, add.labels = add.labels, add.class = add.class) } else if(what == "inverted.information.expected") { - lav_object_inspect_information(lavobject, information = "expected", + lav_object_inspect_information(object, information = "expected", augmented = TRUE, inverted = TRUE, add.labels = add.labels, add.class = add.class) } else if(what == "inverted.information.observed") { - lav_object_inspect_information(lavobject, information = "observed", + lav_object_inspect_information(object, information = "observed", augmented = TRUE, inverted = TRUE, add.labels = add.labels, add.class = add.class) } else if(what == "inverted.information.first.order" || what == "inverted.first.order") { - lav_object_inspect_information(lavobject, information = "first.order", + lav_object_inspect_information(object, information = "first.order", augmented = TRUE, inverted = TRUE, add.labels = add.labels, add.class = add.class) } else if(what == "vcov") { - lav_object_inspect_vcov(lavobject, + lav_object_inspect_vcov(object, standardized = FALSE, add.labels = add.labels, add.class = add.class) } else if(what == "vcov.std.all" || what == "vcov.standardized" || what == "vcov.std") { - lav_object_inspect_vcov(lavobject, + lav_object_inspect_vcov(object, standardized = TRUE, type = "std.all", add.labels = add.labels, add.class = add.class) } else if(what == "vcov.std.lv") { - lav_object_inspect_vcov(lavobject, + lav_object_inspect_vcov(object, standardized = TRUE, type = "std.lv", add.labels = add.labels, add.class = add.class) } else if(what == "vcov.std.nox") { - lav_object_inspect_vcov(lavobject, + lav_object_inspect_vcov(object, standardized = TRUE, type = "std.nox", add.labels = add.labels, add.class = add.class) } else if(what == "vcov.def") { - lav_object_inspect_vcov_def(lavobject, + lav_object_inspect_vcov_def(object, standardized = FALSE, add.labels = add.labels, add.class = add.class) } else if(what == "vcov.def.std.all" || what == "vcov.def.standardized" || what == "vcov.def.std") { - lav_object_inspect_vcov_def(lavobject, + lav_object_inspect_vcov_def(object, standardized = TRUE, type = "std.all", add.labels = add.labels, add.class = add.class) } else if(what == "vcov.def.std.lv") { - lav_object_inspect_vcov_def(lavobject, + lav_object_inspect_vcov_def(object, standardized = TRUE, type = "std.lv", add.labels = add.labels, add.class = add.class) } else if(what == "vcov.def.std.nox") { - lav_object_inspect_vcov_def(lavobject, + lav_object_inspect_vcov_def(object, standardized = TRUE, type = "std.nox", add.labels = add.labels, add.class = add.class) } else if(what == "ugamma" || what == "ug" || what == "u.gamma") { - lav_object_inspect_UGamma(lavobject, + lav_object_inspect_UGamma(object, add.labels = add.labels, add.class = add.class) ### jacobians #### } else if(what == "delta") { - lav_object_inspect_delta(lavobject, + lav_object_inspect_delta(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) # post-checking } else if(what == "post.check" || what == "post") { - lav_object_post_check(lavobject) + lav_object_post_check(object) # options } else if(what == "options" || what == "lavoptions") { - lavobject@Options + object@Options # call } else if(what == "call") { - as.list( lavobject@call ) + as.list( object@call ) # timing } else if(what == "timing") { - lavobject@timing + object@timing # optim } else if(what == "optim") { - lavobject@optim + object@optim # test } else if(what == "test") { - lavobject@test + object@test + + # zero cell tables + } else if(what == "zero.cell.tables") { + lav_object_inspect_zero_cell_tables(object, + add.labels = add.labels, add.class = add.class, + drop.list.single.group = drop.list.single.group) #### not found #### } else { @@ -427,125 +439,132 @@ # helper functions (mostly to deal with older 'object' that may have # been save somewhere) -lav_object_inspect_est <- function(lavobject) { +lav_object_inspect_est <- function(object) { - # from 0.5-19, they are in the partable - if(!is.null(lavobject@ParTable$est)) { - OUT <- lavobject@ParTable$est - } else if("Fit" %in% slotNames(lavobject)) { - # in < 0.5-19, we should look in @Fit@est - OUT <- lavobject@Fit@est + if(class(object) == "lavaan") { + # from 0.5-19, they are in the partable + if(!is.null(object@ParTable$est)) { + OUT <- object@ParTable$est + } else if("Fit" %in% slotNames(object)) { + # in < 0.5-19, we should look in @Fit@est + OUT <- object@Fit@est + } else { + PT <- parTable(object) + OUT <- rep(as.numeric(NA), length(PT$lhs)) + } } else { - PT <- parTable(lavobject) - OUT <- rep(as.numeric(NA), length(PT$lhs)) + # try coef() + OUT <- coef(object, type = "user") + if(is.matrix(OUT)) { + # lavaanList? + OUT <- rowMeans(OUT) + } } OUT } -lav_object_inspect_se <- function(lavobject) { +lav_object_inspect_se <- function(object) { # from 0.5-19, they are in the partable - if(!is.null(lavobject@ParTable$se)) { - OUT <- lavobject@ParTable$se - } else if("Fit" %in% slotNames(lavobject)) { + if(!is.null(object@ParTable$se)) { + OUT <- object@ParTable$se + } else if("Fit" %in% slotNames(object)) { # in < 0.5-19, we should look in @Fit@se - OUT <- lavobject@Fit@se + OUT <- object@Fit@se } else { - PT <- parTable(lavobject) + PT <- parTable(object) OUT <- rep(as.numeric(NA), length(PT$lhs)) } OUT } -lav_object_inspect_start <- function(lavobject) { +lav_object_inspect_start <- function(object) { # from 0.5-19, they are in the partable - if(!is.null(lavobject@ParTable$start)) { - OUT <- lavobject@ParTable$start + if(!is.null(object@ParTable$start)) { + OUT <- object@ParTable$start } else { # in < 0.5-19, we should look in @Fit@start - OUT <- lavobject@Fit@start + OUT <- object@Fit@start } OUT } -lav_object_inspect_boot <- function(lavobject) { +lav_object_inspect_boot <- function(object) { # from 0.5-19. they are in a separate slot - tmp <- try(slot(lavobject,"boot"), silent = TRUE) + tmp <- try(slot(object,"boot"), silent = TRUE) if(inherits(tmp, "try-error")) { # older version of object? - est <- lav_object_inspect_est(lavobject) + est <- lav_object_inspect_est(object) BOOT <- attr(est, "BOOT.COEF") } else { # 0.5-19 way - BOOT <- lavobject@boot$coef + BOOT <- object@boot$coef } BOOT } -lav_object_inspect_modelmatrices <- function(lavobject, what = "free", +lav_object_inspect_modelmatrices <- function(object, what = "free", type = "free", add.labels = FALSE, add.class = FALSE, list.by.group = FALSE, drop.list.single.group = FALSE) { - GLIST <- lavobject@Model@GLIST + GLIST <- object@Model@GLIST if(what == "dx.free") { - DX <- lav_model_gradient(lavmodel = lavobject@Model, + DX <- lav_model_gradient(lavmodel = object@Model, GLIST = NULL, - lavsamplestats = lavobject@SampleStats, - lavdata = lavobject@Data, - lavcache = lavobject@Cache, + lavsamplestats = object@SampleStats, + lavdata = object@Data, + lavcache = object@Cache, type = "free", - estimator = lavobject@Options$estimator, verbose = FALSE, forcePD = TRUE, group.weight = TRUE, Delta = NULL) } else if(what == "dx.all") { - GLIST <- lav_model_gradient(lavmodel = lavobject@Model, + GLIST <- lav_model_gradient(lavmodel = object@Model, GLIST = NULL, - lavsamplestats = lavobject@SampleStats, - lavdata = lavobject@Data, - lavcache = lavobject@Cache, + lavsamplestats = object@SampleStats, + lavdata = object@Data, + lavcache = object@Cache, type = "allofthem", - estimator = lavobject@Options$estimator, verbose = FALSE, forcePD = TRUE, group.weight = TRUE, Delta = NULL) - names(GLIST) <- names(lavobject@Model@GLIST) + names(GLIST) <- names(object@Model@GLIST) } else if(what == "std.all") { - STD <- standardize.est.all(lavobject) + STD <- standardize.est.all(object) } else if(what == "std.lv") { - STD <- standardize.est.lv(lavobject) + STD <- standardize.est.lv(object) } else if(what == "std.nox") { - STD <- standardize.est.all.nox(lavobject) + STD <- standardize.est.all.nox(object) } for(mm in 1:length(GLIST)) { if(add.labels) { - dimnames(GLIST[[mm]]) <- lavobject@Model@dimNames[[mm]] + dimnames(GLIST[[mm]]) <- object@Model@dimNames[[mm]] } if(what == "free") { # fill in free parameter counts if(type == "free") { - m.el.idx <- lavobject@Model@m.free.idx[[mm]] - x.el.idx <- lavobject@Model@x.free.idx[[mm]] + m.el.idx <- object@Model@m.free.idx[[mm]] + x.el.idx <- object@Model@x.free.idx[[mm]] #} else if(type == "unco") { - # m.el.idx <- lavobject@Model@m.unco.idx[[mm]] - # x.el.idx <- lavobject@Model@x.unco.idx[[mm]] + # m.el.idx <- object@Model@m.unco.idx[[mm]] + # x.el.idx <- object@Model@x.unco.idx[[mm]] } else if(type == "partable") { - m.el.idx <- lavobject@Model@m.user.idx[[mm]] - x.el.idx <- lavobject@Model@x.user.idx[[mm]] + m.el.idx <- object@Model@m.user.idx[[mm]] + x.el.idx <- object@Model@x.user.idx[[mm]] } else { stop("lavaan ERROR: unknown type argument:", type, ) } @@ -554,40 +573,40 @@ GLIST[[mm]][m.el.idx] <- x.el.idx } else if(what == "se") { # fill in standard errors - m.user.idx <- lavobject@Model@m.user.idx[[mm]] - x.user.idx <- lavobject@Model@x.user.idx[[mm]] - SE <- lav_object_inspect_se(lavobject) + m.user.idx <- object@Model@m.user.idx[[mm]] + x.user.idx <- object@Model@x.user.idx[[mm]] + SE <- lav_object_inspect_se(object) # erase everything GLIST[[mm]][,] <- 0.0 GLIST[[mm]][m.user.idx] <- SE[x.user.idx] } else if(what == "start") { # fill in starting values - m.user.idx <- lavobject@Model@m.user.idx[[mm]] - x.user.idx <- lavobject@Model@x.user.idx[[mm]] - START <- lav_object_inspect_start(lavobject) + m.user.idx <- object@Model@m.user.idx[[mm]] + x.user.idx <- object@Model@x.user.idx[[mm]] + START <- lav_object_inspect_start(object) GLIST[[mm]][m.user.idx] <- START[x.user.idx] } else if(what == "est") { # fill in estimated parameter values - m.user.idx <- lavobject@Model@m.user.idx[[mm]] - x.user.idx <- lavobject@Model@x.user.idx[[mm]] - EST <- lav_object_inspect_est(lavobject) + m.user.idx <- object@Model@m.user.idx[[mm]] + x.user.idx <- object@Model@x.user.idx[[mm]] + EST <- lav_object_inspect_est(object) GLIST[[mm]][m.user.idx] <- EST[x.user.idx] } else if(what == "dx.free") { # fill in derivatives free parameters - m.el.idx <- lavobject@Model@m.free.idx[[mm]] - x.el.idx <- lavobject@Model@x.free.idx[[mm]] + m.el.idx <- object@Model@m.free.idx[[mm]] + x.el.idx <- object@Model@x.free.idx[[mm]] # erase everything GLIST[[mm]][,] <- 0.0 GLIST[[mm]][m.el.idx] <- DX[x.el.idx] } else if(what %in% c("std.all", "std.lv", "std.nox")) { - m.user.idx <- lavobject@Model@m.user.idx[[mm]] - x.user.idx <- lavobject@Model@x.user.idx[[mm]] + m.user.idx <- object@Model@m.user.idx[[mm]] + x.user.idx <- object@Model@x.user.idx[[mm]] GLIST[[mm]][m.user.idx] <- STD[x.user.idx] } # class if(add.class) { - if(lavobject@Model@isSymmetric[mm]) { + if(object@Model@isSymmetric[mm]) { class(GLIST[[mm]]) <- c("lavaan.matrix.symmetric", "matrix") } else { class(GLIST[[mm]]) <- c("lavaan.matrix", "matrix") @@ -597,9 +616,9 @@ # try to reflect `equality constraints' con.flag <- FALSE - if(what == "free" && lavobject@Model@eq.constraints) { + if(what == "free" && object@Model@eq.constraints) { # extract constraints from parameter table - PT <- parTable(lavobject) + PT <- parTable(object) CON <- PT[PT$op %in% c("==","<",">") ,c("lhs","op","rhs")] rownames(CON) <- NULL @@ -648,8 +667,8 @@ # should we group them per group? if(list.by.group) { - lavsamplestats <- lavobject@SampleStats - lavmodel <- lavobject@Model + lavsamplestats <- object@SampleStats + lavmodel <- object@Model nmat <- lavmodel@nmat OUT <- vector("list", length = lavsamplestats@ngroups) @@ -664,8 +683,8 @@ if(lavsamplestats@ngroups == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } } else { @@ -693,19 +712,19 @@ # for ML, we have both joint and residual cov/var/...; but for # categorical = TRUE, we only have residual cov/var...; so, we # only return residual in both cases, whenever residual -lav_object_inspect_sampstat <- function(lavobject, h1 = FALSE, +lav_object_inspect_sampstat <- function(object, h1 = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups - ov.names <- lavobject@pta$vnames$ov - ov.names.res <- lavobject@pta$vnames$ov.nox - ov.names.x <- lavobject@pta$vnames$ov.x - lavsamplestats <- lavobject@SampleStats + G <- object@Data@ngroups + ov.names <- object@pta$vnames$ov + ov.names.res <- object@pta$vnames$ov.nox + ov.names.x <- object@pta$vnames$ov.x + lavsamplestats <- object@SampleStats OUT <- vector("list", length=G) for(g in 1:G) { - if(!lavobject@Model@conditional.x) { + if(!object@Model@conditional.x) { # covariance matrix if(h1 && !is.null(lavsamplestats@missing.h1[[g]])) { @@ -735,14 +754,14 @@ } # thresholds - if(lavobject@Model@categorical) { + if(object@Model@categorical) { OUT[[g]]$th <- as.numeric(lavsamplestats@th[[g]]) - if(length(lavobject@Model@num.idx[[g]]) > 0L) { - NUM.idx <- which(lavobject@Model@th.idx[[g]] == 0) + if(length(object@Model@num.idx[[g]]) > 0L) { + NUM.idx <- which(object@Model@th.idx[[g]] == 0) OUT[[g]]$th <- OUT[[g]]$th[ -NUM.idx ] } if(add.labels) { - names(OUT[[g]]$th) <- lavobject@pta$vnames$th[[g]] + names(OUT[[g]]$th) <- object@pta$vnames$th[[g]] } if(add.class) { class(OUT[[g]]$th) <- c("lavaan.vector", "numeric") @@ -764,7 +783,7 @@ } # intercepts - if(lavobject@Model@conditional.x) { + if(object@Model@conditional.x) { OUT[[g]]$res.int <- as.numeric(lavsamplestats@res.int[[g]]) if(add.labels) { names(OUT[[g]]$res.int) <- ov.names.res[[g]] @@ -775,14 +794,14 @@ } # thresholds - if(lavobject@Model@categorical) { + if(object@Model@categorical) { OUT[[g]]$res.th <- as.numeric(lavsamplestats@res.th[[g]]) - if(length(lavobject@Model@num.idx[[g]]) > 0L) { - NUM.idx <- which(lavobject@Model@th.idx[[g]] == 0) + if(length(object@Model@num.idx[[g]]) > 0L) { + NUM.idx <- which(object@Model@th.idx[[g]] == 0) OUT[[g]]$res.th <- OUT[[g]]$res.th[ -NUM.idx ] } if(add.labels) { - names(OUT[[g]]$res.th) <- lavobject@pta$vnames$th[[g]] + names(OUT[[g]]$res.th) <- object@pta$vnames$th[[g]] } if(add.class) { class(OUT[[g]]$res.th) <- c("lavaan.vector", "numeric") @@ -790,7 +809,7 @@ } # slopes - if(lavobject@Model@nexo > 0L) { + if(object@Model@nexo > 0L) { OUT[[g]]$res.slopes <- lavsamplestats@res.slopes[[g]] if(add.labels) { rownames(OUT[[g]]$res.slopes) <- ov.names.res[[g]] @@ -802,7 +821,7 @@ } # cov.x - if(lavobject@Model@nexo > 0L) { + if(object@Model@nexo > 0L) { OUT[[g]]$cov.x <- lavsamplestats@cov.x[[g]] if(add.labels) { rownames(OUT[[g]]$cov.x) <- ov.names.x[[g]] @@ -817,7 +836,7 @@ } # conditional.x # stochastic weights - if(lavobject@Model@group.w.free) { + if(object@Model@group.w.free) { # to be consistent with the 'implied' values, # transform so group.w is the 'log(group.freq)' OUT[[g]]$group.w <- @@ -834,8 +853,8 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } @@ -843,40 +862,40 @@ } -lav_object_inspect_data <- function(lavobject, add.labels = FALSE, +lav_object_inspect_data <- function(object, add.labels = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups - OUT <- lavobject@Data@X + G <- object@Data@ngroups + OUT <- object@Data@X if(add.labels) { for(g in 1:G) { - colnames(OUT[[g]]) <- lavobject@Data@ov.names[[g]] + colnames(OUT[[g]]) <- object@Data@ov.names[[g]] } } if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } OUT } -lav_object_inspect_case_idx <- function(lavobject, +lav_object_inspect_case_idx <- function(object, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups - OUT <- lavobject@Data@case.idx + G <- object@Data@ngroups + OUT <- object@Data@case.idx if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } @@ -884,17 +903,17 @@ } -lav_object_inspect_rsquare <- function(lavobject, est.std.all=NULL, +lav_object_inspect_rsquare <- function(object, est.std.all=NULL, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups OUT <- vector("list", length=G) if(is.null(est.std.all)) { - est.std.all <- standardize.est.all(lavobject) + est.std.all <- standardize.est.all(object) } - partable <- lavobject@ParTable + partable <- object@ParTable partable$rsquare <- 1.0 - est.std.all # no values > 1.0 partable$rsquare[partable$rsquare > 1.0] <- as.numeric(NA) @@ -923,8 +942,8 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } @@ -932,19 +951,19 @@ } # model implied sample stats -lav_object_inspect_implied <- function(lavobject, +lav_object_inspect_implied <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups - ov.names <- lavobject@pta$vnames$ov - ov.names.res <- lavobject@pta$vnames$ov.nox - ov.names.x <- lavobject@pta$vnames$ov.x - lavimplied <- lavobject@implied + G <- object@Data@ngroups + ov.names <- object@pta$vnames$ov + ov.names.res <- object@pta$vnames$ov.nox + ov.names.x <- object@pta$vnames$ov.x + lavimplied <- object@implied OUT <- vector("list", length=G) for(g in 1:G) { - if(!lavobject@Model@conditional.x) { + if(!object@Model@conditional.x) { # covariance matrix OUT[[g]]$cov <- lavimplied$cov[[g]] @@ -966,14 +985,14 @@ } # thresholds - if(lavobject@Model@categorical) { + if(object@Model@categorical) { OUT[[g]]$th <- as.numeric(lavimplied$th[[g]]) - if(length(lavobject@Model@num.idx[[g]]) > 0L) { - NUM.idx <- which(lavobject@Model@th.idx[[g]] == 0) + if(length(object@Model@num.idx[[g]]) > 0L) { + NUM.idx <- which(object@Model@th.idx[[g]] == 0) OUT[[g]]$th <- OUT[[g]]$th[ -NUM.idx ] } if(add.labels) { - names(OUT[[g]]$th) <- lavobject@pta$vnames$th[[g]] + names(OUT[[g]]$th) <- object@pta$vnames$th[[g]] } if(add.class) { class(OUT[[g]]$th) <- c("lavaan.vector", "numeric") @@ -995,7 +1014,7 @@ } # intercepts - if(lavobject@Model@conditional.x) { + if(object@Model@conditional.x) { OUT[[g]]$res.int <- as.numeric(lavimplied$res.int[[g]]) if(add.labels) { names(OUT[[g]]$res.int) <- ov.names.res[[g]] @@ -1006,14 +1025,14 @@ } # thresholds - if(lavobject@Model@categorical) { + if(object@Model@categorical) { OUT[[g]]$res.th <- as.numeric(lavimplied$res.th[[g]]) - if(length(lavobject@Model@num.idx[[g]]) > 0L) { - NUM.idx <- which(lavobject@Model@th.idx[[g]] == 0) + if(length(object@Model@num.idx[[g]]) > 0L) { + NUM.idx <- which(object@Model@th.idx[[g]] == 0) OUT[[g]]$res.th <- OUT[[g]]$res.th[ -NUM.idx ] } if(add.labels) { - names(OUT[[g]]$res.th) <- lavobject@pta$vnames$th[[g]] + names(OUT[[g]]$res.th) <- object@pta$vnames$th[[g]] } if(add.class) { class(OUT[[g]]$res.th) <- c("lavaan.vector", "numeric") @@ -1021,7 +1040,7 @@ } # slopes - if(lavobject@Model@nexo > 0L) { + if(object@Model@nexo > 0L) { OUT[[g]]$res.slopes <- lavimplied$res.slopes[[g]] if(add.labels) { rownames(OUT[[g]]$res.slopes) <- ov.names.res[[g]] @@ -1033,8 +1052,8 @@ } # cov.x - if(lavobject@Model@nexo > 0L) { - OUT[[g]]$cov.x <- lavobject@SampleStats@cov.x[[g]] + if(object@Model@nexo > 0L) { + OUT[[g]]$cov.x <- object@SampleStats@cov.x[[g]] if(add.labels) { rownames(OUT[[g]]$cov.x) <- ov.names.x[[g]] colnames(OUT[[g]]$cov.x) <- ov.names.x[[g]] @@ -1048,7 +1067,7 @@ } # conditional.x # stochastic weights - if(lavobject@Model@group.w.free) { + if(object@Model@group.w.free) { OUT[[g]]$group.w <- lavimplied$group.w[[g]] if(add.labels) { names(OUT[[g]]$group.w) <- "w" # somewhat redundant @@ -1062,8 +1081,8 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } @@ -1072,15 +1091,15 @@ # residuals: _inspect_sampstat - _inspect_implied -lav_object_inspect_residuals <- function(lavobject, h1 = TRUE, +lav_object_inspect_residuals <- function(object, h1 = TRUE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # unstandardized residuals - obsList <- lav_object_inspect_sampstat(lavobject, h1 = h1, + obsList <- lav_object_inspect_sampstat(object, h1 = h1, add.labels = add.labels, add.class = FALSE, drop.list.single.group = FALSE) - estList <- lav_object_inspect_implied(lavobject, + estList <- lav_object_inspect_implied(object, add.labels = add.labels, add.class = FALSE, drop.list.single.group = FALSE) @@ -1089,7 +1108,7 @@ resList <- vector("list", length = ngroups) for(g in 1:ngroups) { - if(lavobject@Model@conditional.x) { + if(object@Model@conditional.x) { if(!is.null(estList[[g]]$res.cov)) { resList[[g]]$res.cov <- ( obsList[[g]]$res.cov - estList[[g]]$res.cov ) @@ -1174,8 +1193,8 @@ if(ngroups == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } @@ -1183,14 +1202,14 @@ } -lav_object_inspect_cov_lv <- function(lavobject, correlation.metric = FALSE, +lav_object_inspect_cov_lv <- function(object, correlation.metric = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups # compute lv covar - OUT <- computeVETA(lavmodel = lavobject@Model, - lavsamplestats = lavobject@SampleStats, + OUT <- computeVETA(lavmodel = object@Model, + lavsamplestats = object@SampleStats, remove.dummy.lv = TRUE) # cor + labels + class @@ -1203,7 +1222,7 @@ if(add.labels) { colnames(OUT[[g]]) <- rownames(OUT[[g]]) <- - lavobject@pta$vnames$lv[[g]] + object@pta$vnames$lv[[g]] } if(add.class) { @@ -1214,29 +1233,29 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } OUT } -lav_object_inspect_mean_lv <- function(lavobject, +lav_object_inspect_mean_lv <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups # compute lv means - OUT <- computeEETA(lavmodel = lavobject@Model, - lavsamplestats = lavobject@SampleStats, + OUT <- computeEETA(lavmodel = object@Model, + lavsamplestats = object@SampleStats, remove.dummy.lv = TRUE) OUT <- lapply(OUT, as.numeric) # labels + class for(g in 1:G) { if(add.labels && length(OUT[[g]]) > 0L) { - names(OUT[[g]]) <- lavobject@pta$vnames$lv.regular[[g]] + names(OUT[[g]]) <- object@pta$vnames$lv.regular[[g]] } if(add.class) { class(OUT[[g]]) <- c("lavaan.vector", "numeric") @@ -1246,22 +1265,22 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } OUT } -lav_object_inspect_cov_all <- function(lavobject, correlation.metric = FALSE, +lav_object_inspect_cov_all <- function(object, correlation.metric = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups # compute extended model implied covariance matrix (both ov and lv) - OUT <- computeCOV(lavmodel = lavobject@Model, - lavsamplestats = lavobject@SampleStats, + OUT <- computeCOV(lavmodel = object@Model, + lavsamplestats = object@SampleStats, remove.dummy.lv = TRUE) # cor + labels + class @@ -1273,8 +1292,8 @@ } if(add.labels) { - NAMES <- c(lavobject@pta$vnames$ov.model[[g]], - lavobject@pta$vnames$lv.regular[[g]]) + NAMES <- c(object@pta$vnames$ov.model[[g]], + object@pta$vnames$lv.regular[[g]]) colnames(OUT[[g]]) <- rownames(OUT[[g]]) <- NAMES } if(add.class) { @@ -1285,8 +1304,8 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } @@ -1294,17 +1313,17 @@ } -lav_object_inspect_cov_ov <- function(lavobject, correlation.metric = FALSE, +lav_object_inspect_cov_ov <- function(object, correlation.metric = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups # get model-implied covariance matrix observed - if(lavobject@Model@conditional.x) { - OUT <- lavobject@implied$res.cov + if(object@Model@conditional.x) { + OUT <- object@implied$res.cov } else { - OUT <- lavobject@implied$cov + OUT <- object@implied$cov } # cor + labels + class @@ -1317,7 +1336,7 @@ if(add.labels) { colnames(OUT[[g]]) <- rownames(OUT[[g]]) <- - lavobject@pta$vnames$ov.model[[g]] + object@pta$vnames$ov.model[[g]] } if(add.class) { class(OUT[[g]]) <- c("lavaan.matrix.symmetric", "matrix") @@ -1327,24 +1346,24 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } OUT } -lav_object_inspect_mean_ov <- function(lavobject, +lav_object_inspect_mean_ov <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups # compute lv means - if(lavobject@Model@conditional.x) { - OUT <- lavobject@implied$res.int + if(object@Model@conditional.x) { + OUT <- object@implied$res.int } else { - OUT <- lavobject@implied$mean + OUT <- object@implied$mean } # make numeric @@ -1353,7 +1372,7 @@ # labels + class for(g in 1:G) { if(add.labels && length(OUT[[g]]) > 0L) { - names(OUT[[g]]) <- lavobject@pta$vnames$ov.model[[g]] + names(OUT[[g]]) <- object@pta$vnames$ov.model[[g]] } if(add.class) { class(OUT[[g]]) <- c("lavaan.vector", "numeric") @@ -1363,24 +1382,24 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } OUT } -lav_object_inspect_th <- function(lavobject, +lav_object_inspect_th <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups # thresholds - if(lavobject@Model@conditional.x) { - OUT <- lavobject@implied$res.th + if(object@Model@conditional.x) { + OUT <- object@implied$res.th } else { - OUT <- lavobject@implied$th + OUT <- object@implied$th } # make numeric @@ -1388,12 +1407,12 @@ # labels + class for(g in 1:G) { - if(length(lavobject@Model@num.idx[[g]]) > 0L) { - NUM.idx <- which(lavobject@Model@th.idx[[g]] == 0) + if(length(object@Model@num.idx[[g]]) > 0L) { + NUM.idx <- which(object@Model@th.idx[[g]] == 0) OUT[[g]] <- OUT[[g]][ -NUM.idx ] } if(add.labels && length(OUT[[g]]) > 0L) { - names(OUT[[g]]) <- lavobject@pta$vnames$th[[g]] + names(OUT[[g]]) <- object@pta$vnames$th[[g]] } if(add.class) { class(OUT[[g]]) <- c("lavaan.vector", "numeric") @@ -1403,36 +1422,36 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } OUT } -lav_object_inspect_vy <- function(lavobject, +lav_object_inspect_vy <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups # 'unconditional' model-implied variances # - same as diag(Sigma.hat) if all Y are continuous) # - 1.0 (or delta^2) if categorical # - if also Gamma, cov.x is used (only if categorical) - OUT <- computeVY(lavmodel = lavobject@Model, GLIST = NULL, - lavsamplestats = lavobject@SampleStats, + OUT <- computeVY(lavmodel = object@Model, GLIST = NULL, + lavsamplestats = object@SampleStats, diagonal.only = TRUE) # labels + class for(g in 1:G) { if(add.labels && length(OUT[[g]]) > 0L) { - if(lavobject@Model@categorical) { - names(OUT[[g]]) <- lavobject@pta$vnames$ov.nox[[g]] + if(object@Model@categorical) { + names(OUT[[g]]) <- object@pta$vnames$ov.nox[[g]] } else { - names(OUT[[g]]) <- lavobject@pta$vnames$ov[[g]] + names(OUT[[g]]) <- object@pta$vnames$ov[[g]] } } if(add.class) { @@ -1443,8 +1462,8 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } @@ -1452,13 +1471,13 @@ } -lav_object_inspect_theta <- function(lavobject, correlation.metric = FALSE, +lav_object_inspect_theta <- function(object, correlation.metric = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups # get residual covariances - OUT <- computeTHETA(lavmodel = lavobject@Model) + OUT <- computeTHETA(lavmodel = object@Model) # labels + class for(g in 1:G) { @@ -1469,7 +1488,7 @@ if(add.labels && length(OUT[[g]]) > 0L) { colnames(OUT[[g]]) <- rownames(OUT[[g]]) <- - lavobject@pta$vnames$ov.model[[g]] + object@pta$vnames$ov.model[[g]] } if(add.class) { @@ -1480,8 +1499,8 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } @@ -1489,25 +1508,25 @@ } -lav_object_inspect_missing_coverage <- function(lavobject, +lav_object_inspect_missing_coverage <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups # get missing covarage OUT <- vector("list", G) for(g in 1:G) { - if(!is.null(lavobject@Data@Mp[[g]])) { - OUT[[g]] <- lavobject@Data@Mp[[g]]$coverage + if(!is.null(object@Data@Mp[[g]])) { + OUT[[g]] <- object@Data@Mp[[g]]$coverage } else { - nvar <- length(lavobject@Data@ov.names[[g]]) + nvar <- length(object@Data@ov.names[[g]]) OUT[[g]] <- matrix(1.0, nvar, nvar) } if(add.labels && length(OUT[[g]]) > 0L) { colnames(OUT[[g]]) <- rownames(OUT[[g]]) <- - lavobject@pta$vnames$ov.model[[g]] + object@pta$vnames$ov.model[[g]] } if(add.class) { @@ -1518,33 +1537,33 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } OUT } -lav_object_inspect_missing_patterns <- function(lavobject, +lav_object_inspect_missing_patterns <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups # get missing covarage OUT <- vector("list", G) for(g in 1:G) { - if(!is.null(lavobject@Data@Mp[[g]])) { - OUT[[g]] <- lavobject@Data@Mp[[g]]$pat + if(!is.null(object@Data@Mp[[g]])) { + OUT[[g]] <- object@Data@Mp[[g]]$pat } else { - nvar <- length(lavobject@Data@ov.names[[g]]) + nvar <- length(object@Data@ov.names[[g]]) OUT[[g]] <- matrix(TRUE, 1L, nvar) - rownames(OUT[[g]]) <- lavobject@Data@nobs[[g]] + rownames(OUT[[g]]) <- object@Data@nobs[[g]] } if(add.labels && length(OUT[[g]]) > 0L) { - colnames(OUT[[g]]) <- lavobject@pta$vnames$ov.model[[g]] + colnames(OUT[[g]]) <- object@pta$vnames$ov.model[[g]] } if(add.class) { @@ -1555,25 +1574,25 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } OUT } -lav_object_inspect_empty_idx <- function(lavobject, +lav_object_inspect_empty_idx <- function(object, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups + G <- object@Data@ngroups # get empty idx OUT <- vector("list", G) for(g in 1:G) { - if(!is.null(lavobject@Data@Mp[[g]])) { - OUT[[g]] <- lavobject@Data@Mp[[g]]$empty.idx + if(!is.null(object@Data@Mp[[g]])) { + OUT[[g]] <- object@Data@Mp[[g]]$empty.idx } else { OUT[[g]] <- integer(0L) } @@ -1582,8 +1601,8 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } @@ -1591,12 +1610,12 @@ } -lav_object_inspect_wls_est <- function(lavobject, +lav_object_inspect_wls_est <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups - OUT <- lav_model_wls_est(lavobject@Model) #, - #cov.x = lavobject@SampleStats@cov.x) + G <- object@Data@ngroups + OUT <- lav_model_wls_est(object@Model) #, + #cov.x = object@SampleStats@cov.x) for(g in 1:G) { if(add.labels && length(OUT[[g]]) > 0L) { @@ -1612,19 +1631,19 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } OUT } -lav_object_inspect_wls_obs <- function(lavobject, +lav_object_inspect_wls_obs <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - G <- lavobject@Data@ngroups - OUT <- lavobject@SampleStats@WLS.obs + G <- object@Data@ngroups + OUT <- object@SampleStats@WLS.obs for(g in 1:G) { if(add.labels && length(OUT[[g]]) > 0L) { @@ -1640,28 +1659,28 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } OUT } -lav_object_inspect_wls_v <- function(lavobject, +lav_object_inspect_wls_v <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # shortcuts - G <- lavobject@Data@ngroups + G <- object@Data@ngroups - OUT <- lav_model_wls_v(lavmodel = lavobject@Model, - lavsamplestats = lavobject@SampleStats, - estimator = lavobject@Options$estimator, - lavdata = lavobject@Data) + OUT <- lav_model_wls_v(lavmodel = object@Model, + lavsamplestats = object@SampleStats, + structured = TRUE, + lavdata = object@Data) # if estimator == "DWLS" or "ULS", we only stored the diagonal # hence, we create a full matrix here - if(lavobject@Options$estimator %in% c("DWLS", "ULS")) { + if(object@Options$estimator %in% c("DWLS", "ULS")) { OUT <- lapply(OUT, function(x) { nr = NROW(x); diag(x, nrow=nr, ncol=nr) }) } @@ -1681,8 +1700,8 @@ if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } @@ -1690,23 +1709,23 @@ } -lav_object_inspect_sampstat_gamma <- function(lavobject, +lav_object_inspect_sampstat_gamma <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # shortcuts - G <- lavobject@Data@ngroups + G <- object@Data@ngroups - if(!is.null(lavobject@SampleStats@NACOV[[1]])) { - OUT <- lavobject@SampleStats@NACOV + if(!is.null(object@SampleStats@NACOV[[1]])) { + OUT <- object@SampleStats@NACOV } else { - OUT <- lavGamma(lavobject) + OUT <- lavGamma(object) } if(G == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } @@ -1714,29 +1733,28 @@ } -lav_object_inspect_gradient <- function(lavobject, +lav_object_inspect_gradient <- function(object, add.labels = FALSE, add.class = FALSE) { - if(lavobject@SampleStats@missing.flag || - lavobject@Options$estimator == "PML") { + if(object@SampleStats@missing.flag || + object@Options$estimator == "PML") { group.weight <- FALSE } else { group.weight <- TRUE } - OUT <- lav_model_gradient(lavmodel = lavobject@Model, + OUT <- lav_model_gradient(lavmodel = object@Model, GLIST = NULL, - lavsamplestats = lavobject@SampleStats, - lavdata = lavobject@Data, - lavcache = lavobject@Cache, + lavsamplestats = object@SampleStats, + lavdata = object@Data, + lavcache = object@Cache, type = "free", - estimator = lavobject@Options$estimator, verbose = FALSE, group.weight = group.weight) # labels if(add.labels) { - names(OUT) <- lav_partable_labels(lavobject@ParTable, type="free") + names(OUT) <- lav_partable_labels(object@ParTable, type="free") } # class @@ -1747,20 +1765,19 @@ OUT } -lav_object_inspect_hessian <- function(lavobject, +lav_object_inspect_hessian <- function(object, add.labels = FALSE, add.class = FALSE) { - OUT <- lav_model_hessian(lavmodel = lavobject@Model, - lavsamplestats = lavobject@SampleStats, - lavdata = lavobject@Data, - lavcache = lavobject@Cache, - estimator = lavobject@Options$estimator, + OUT <- lav_model_hessian(lavmodel = object@Model, + lavsamplestats = object@SampleStats, + lavdata = object@Data, + lavcache = object@Cache, group.weight = TRUE) # labels if(add.labels) { colnames(OUT) <- rownames(OUT) <- - lav_partable_labels(lavobject@ParTable, type="free") + lav_partable_labels(object@ParTable, type="free") } # class @@ -1771,29 +1788,27 @@ OUT } -lav_object_inspect_information <- function(lavobject, +lav_object_inspect_information <- function(object, information = "default", augmented = FALSE, inverted = FALSE, add.labels = FALSE, add.class = FALSE) { if(information == "default") { - information <- lavobject@Options$information + information <- object@Options$information } if(information == "expected" || information == "observed") { - OUT <- lav_model_information(lavmodel = lavobject@Model, - lavsamplestats = lavobject@SampleStats, - lavdata = lavobject@Data, - estimator = lavobject@Options$estimator, - lavcache = lavobject@Cache, + OUT <- lav_model_information(lavmodel = object@Model, + lavsamplestats = object@SampleStats, + lavdata = object@Data, + lavcache = object@Cache, information = information, augmented = augmented, inverted = inverted) } else if(information == "first.order") { - B0 <- lav_model_information_firstorder(lavmodel = lavobject@Model, - lavsamplestats = lavobject@SampleStats, - lavdata = lavobject@Data, - estimator = lavobject@Options$estimator, - lavcache = lavobject@Cache, + B0 <- lav_model_information_firstorder(lavmodel = object@Model, + lavsamplestats = object@SampleStats, + lavdata = object@Data, + lavcache = object@Cache, check.pd = FALSE, augmented = augmented, inverted = inverted) @@ -1803,7 +1818,7 @@ # labels if(add.labels) { - NAMES <- lav_partable_labels(lavobject@ParTable, type="free") + NAMES <- lav_partable_labels(object@ParTable, type="free") if(augmented) { nExtra <- nrow(OUT) - length(NAMES) if(nExtra > 0L) { @@ -1822,14 +1837,13 @@ } # only to provide a direct function to the old 'getVariability()' function -lav_object_inspect_firstorder <- function(lavobject, +lav_object_inspect_firstorder <- function(object, add.labels = FALSE, add.class = FALSE) { - B0 <- lav_model_information_firstorder(lavmodel = lavobject@Model, - lavsamplestats = lavobject@SampleStats, - lavdata = lavobject@Data, - estimator = lavobject@Options$estimator, - lavcache = lavobject@Cache, + B0 <- lav_model_information_firstorder(lavmodel = object@Model, + lavsamplestats = object@SampleStats, + lavdata = object@Data, + lavcache = object@Cache, check.pd = FALSE, augmented = FALSE, inverted = FALSE) @@ -1839,7 +1853,7 @@ # labels if(add.labels) { colnames(OUT) <- rownames(OUT) <- - lav_partable_labels(lavobject@ParTable, type="free") + lav_partable_labels(object@ParTable, type="free") } # class @@ -1850,25 +1864,25 @@ OUT } -lav_object_inspect_vcov <- function(lavobject, standardized = FALSE, +lav_object_inspect_vcov <- function(object, standardized = FALSE, type = "std.all", free.only = TRUE, add.labels = FALSE, add.class = FALSE, remove.duplicated = FALSE) { - npar <- max(lavobject@ParTable$free) - if(lavobject@optim$npar == 0) { + npar <- max(object@ParTable$free) + if(object@optim$npar == 0) { OUT <- matrix(0,0,0) } else { # check if we already have it - tmp <- try(slot(lavobject, "vcov"), silent = TRUE) - if(!inherits(tmp, "try-error") && !is.null(lavobject@vcov$vcov)) { - OUT <- lavobject@vcov$vcov + tmp <- try(slot(object, "vcov"), silent = TRUE) + if(!inherits(tmp, "try-error") && !is.null(object@vcov$vcov)) { + OUT <- object@vcov$vcov } else { # compute it again - OUT <- lav_model_vcov(lavmodel = lavobject@Model, - lavsamplestats = lavobject@SampleStats, - lavoptions = lavobject@Options, - lavdata = lavobject@Data, - lavcache = lavobject@Cache + OUT <- lav_model_vcov(lavmodel = object@Model, + lavsamplestats = object@SampleStats, + lavoptions = object@Options, + lavdata = object@Data, + lavcache = object@Cache ) } } @@ -1886,32 +1900,32 @@ if(standardized) { if(type == "std.lv") { JAC <- try(lav_func_jacobian_complex(func = standardize.est.lv.x, - x = lavobject@optim$x, lavobject = lavobject), silent = TRUE) + x = object@optim$x, lavobject = object), silent = TRUE) if(inherits(JAC, "try-error")) { # eg. pnorm() JAC <- lav_func_jacobian_simple(func = standardize.est.lv.x, - x = lavobject@optim$x, lavobject=lavobject) + x = object@optim$x, lavobject = object) } } else if(type == "std.all") { JAC <- try(lav_func_jacobian_complex(func = standardize.est.all.x, - x = lavobject@optim$x, lavobject = lavobject), silent = TRUE) + x = object@optim$x, object = object), silent = TRUE) if(inherits(JAC, "try-error")) { # eg. pnorm() JAC <- lav_func_jacobian_simple(func = standardize.est.all.x, - x = lavobject@optim$x, lavobject=lavobject) + x = object@optim$x, lavobject = object) } } else if(type == "std.nox") { JAC <- try(lav_func_jacobian_complex(func = standardize.est.all.nox.x, - x = lavobject@optim$x, lavobject = lavobject), silent = TRUE) + x = object@optim$x, lavobject = object), silent = TRUE) if(inherits(JAC, "try-error")) { # eg. pnorm() JAC <- lav_func_jacobian_simple(func = standardize.est.all.nox.x, - x = lavobject@optim$x, lavobject=lavobject) + x = object@optim$x, lavobject = object) } } # JAC contains *all* parameters in the parameter table if(free.only) { - free.idx <- which(lavobject@ParTable$free > 0L) + free.idx <- which(object@ParTable$free > 0L) JAC <- JAC[free.idx,, drop = FALSE] } OUT <- JAC %*% OUT %*% t(JAC) @@ -1920,14 +1934,14 @@ # labels if(add.labels) { colnames(OUT) <- rownames(OUT) <- - lav_partable_labels(lavobject@ParTable, type="free") + lav_partable_labels(object@ParTable, type="free") } # alias? - if(remove.duplicated && lavobject@Model@eq.constraints) { - simple.flag <- lav_constraints_check_simple(lavobject@Model) + if(remove.duplicated && object@Model@eq.constraints) { + simple.flag <- lav_constraints_check_simple(object@Model) if(simple.flag) { - LAB <- lav_partable_labels(lavobject@ParTable, type="free") + LAB <- lav_partable_labels(object@ParTable, type="free") dup.flag <- duplicated(LAB) OUT <- OUT[!dup.flag, !dup.flag, drop = FALSE] } else { @@ -1943,11 +1957,11 @@ OUT } -lav_object_inspect_vcov_def <- function(lavobject, standardized = FALSE, +lav_object_inspect_vcov_def <- function(object, standardized = FALSE, type = "std.all", add.labels = FALSE, add.class = FALSE) { - lavmodel <- lavobject@Model - lavpartable <- lavobject@ParTable + lavmodel <- object@Model + lavpartable <- object@ParTable def.idx <- which(lavpartable$op == ":=") if(length(def.idx) == 0L) { @@ -1956,7 +1970,7 @@ if(standardized) { # compute VCOV for "free" parameters only - VCOV <- lav_object_inspect_vcov(lavobject = lavobject, + VCOV <- lav_object_inspect_vcov(object, standardized = TRUE, type = type, free.only = FALSE, add.labels = FALSE, add.class = FALSE) @@ -1967,8 +1981,8 @@ x <- lav_model_get_parameters(lavmodel, type = "free") # bootstrap or not? - if(!is.null(lavobject@boot$coef)) { - BOOT <- lavobject@boot$coef + if(!is.null(object@boot$coef)) { + BOOT <- object@boot$coef BOOT.def <- apply(BOOT, 1L, lavmodel@def.function) if(length(def.idx) == 1L) { BOOT.def <- as.matrix(BOOT.def) @@ -1978,7 +1992,7 @@ OUT <- cov(BOOT.def) } else { # VCOV - VCOV <- lav_object_inspect_vcov(lavobject = lavobject, + VCOV <- lav_object_inspect_vcov(object, standardized = FALSE, type = type, free.only = TRUE, add.labels = FALSE, @@ -2009,10 +2023,10 @@ OUT } -lav_object_inspect_UGamma <- function(lavobject, +lav_object_inspect_UGamma <- function(object, add.labels = FALSE, add.class = FALSE) { - out <- lav_test_satorra_bentler(lavobject = lavobject, + out <- lav_test_satorra_bentler(lavobject = object, return.ugamma = TRUE) OUT <- out$UGamma @@ -2030,13 +2044,13 @@ } # Delta (jacobian: d samplestats / d free_parameters) -lav_object_inspect_delta <- function(lavobject, +lav_object_inspect_delta <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { - OUT <- computeDelta(lavobject@Model) + OUT <- computeDelta(object@Model) # labels - lavmodel <- lavobject@Model + lavmodel <- object@Model categorical <- lavmodel@categorical conditional.x <- lavmodel@conditional.x group.w.free <- lavmodel@group.w.free @@ -2044,20 +2058,20 @@ num.idx <- lavmodel@num.idx th.idx <- lavmodel@th.idx nexo <- lavmodel@nexo - ngroups <- lavmodel@ngroups + nblocks <- lavmodel@nblocks if(add.labels) { - PNAMES <- lav_partable_labels(lavobject@ParTable, type="free") + PNAMES <- lav_partable_labels(object@ParTable, type="free") - for(g in 1:ngroups) { + for(g in 1:nblocks) { colnames(OUT[[g]]) <- PNAMES if(conditional.x) { - ov.names <- lavobject@pta$vnames$ov.nox[[g]] + ov.names <- object@pta$vnames$ov.nox[[g]] } else { - ov.names <- lavobject@pta$vnames$ov[[g]] + ov.names <- object@pta$vnames$ov[[g]] } - ov.names.x <- lavobject@pta$vnames$ov.x[[g]] + ov.names.x <- object@pta$vnames$ov.x[[g]] nvar <- length(ov.names) @@ -2090,7 +2104,7 @@ # th if(categorical) { - names.th <- lavobject@pta$vnames$th[[g]] + names.th <- object@pta$vnames$th[[g]] # interweave numeric intercepts, if any if(length(num.idx[[g]]) > 0L) { tmp <- character( length(th.idx[[g]]) ) @@ -2119,15 +2133,48 @@ } # g } # labels - if(ngroups == 1L && drop.list.single.group) { + if(nblocks == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { - if(length(lavobject@Data@group.label) > 0L) { - names(OUT) <- unlist(lavobject@Data@group.label) + if(length(object@Data@group.label) > 0L) { + names(OUT) <- unlist(object@Data@group.label) } } OUT } +lav_object_inspect_zero_cell_tables <- function(object, + add.labels = FALSE, add.class = FALSE, + drop.list.single.group = FALSE) { + + # categorical? + if(!object@Model@categorical) { + warning("lavaan WARNING: no categorical variables in fitted model") + return(invisible(list())) + } + lavdata <- object@Data + + # create 2-way tables + TABLE <- lavTables(object, dimension = 2L, output = "data.frame", + statistic = NULL) + + # select tables with empty cells + empty.id <- TABLE$id[which(TABLE$obs.freq == 0)] + + + if(length(empty.id) == 0L) { + # only when lavInspect() is used, give message + if(add.class) { + cat("(There are no tables with empty cells for this fitted model)\n") + } + return(invisible(list())) + } else { + OUT <- lav_tables_cells_format(TABLE[TABLE$id %in% empty.id,], + lavdata = lavdata, + drop.list.single.group = drop.list.single.group) + } + + OUT +} diff -Nru r-cran-lavaan-0.5.22/R/lav_objective.R r-cran-lavaan-0.5.23.1097/R/lav_objective.R --- r-cran-lavaan-0.5.22/R/lav_objective.R 2016-09-24 12:24:05.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_objective.R 2016-12-27 19:59:28.000000000 +0000 @@ -23,7 +23,7 @@ } # no negative values - if(fx < 0.0) fx <- 0.0 + if(is.finite(fx) && fx < 0.0) fx <- 0.0 fx } @@ -56,7 +56,7 @@ fx <- objective.sigma + objective.beta # no negative values - if(fx < 0.0) fx <- 0.0 + if(is.finite(fx) && fx < 0.0) fx <- 0.0 fx } @@ -96,7 +96,7 @@ fx <- fx + ( 1/nobs * (reml.h0 - reml.h1) ) # no negative values - if(fx < 0.0) fx <- 0.0 + if(is.finite(fx) && fx < 0.0) fx <- 0.0 fx } @@ -119,7 +119,7 @@ } # no negative values - if(fx < 0.0) fx <- 0.0 + if(is.finite(fx) && fx < 0.0) fx <- 0.0 fx } @@ -136,7 +136,7 @@ fx <- as.numeric( crossprod(crossprod(WLS.V, diff), diff) ) # no negative values - if(fx < 0.0) fx <- 0.0 + if(is.finite(fx) && fx < 0.0) fx <- 0.0 fx } @@ -148,53 +148,30 @@ fx <- sum(diff * diff * WLS.VD) # no negative values - if(fx < 0.0) fx <- 0.0 + if(is.finite(fx) && fx < 0.0) fx <- 0.0 fx } # Full Information ML estimator (FIML) handling the missing values -estimator.FIML <- function(Sigma.hat=NULL, Mu.hat=NULL, M=NULL, h1=NULL) { +estimator.FIML <- function(Sigma.hat = NULL, Mu.hat = NULL, Yp = NULL, + h1 = NULL, N = NULL) { - npatterns <- length(M) - - fx.p <- numeric(npatterns) - w.p <- numeric(npatterns) - - # for each missing pattern, combine cases and compute raw loglikelihood - for(p in 1:npatterns) { - - SX <- M[[p]][["SY"]] - MX <- M[[p]][["MY"]] - w.p[p] <- nobs <- M[[p]][["freq"]] - var.idx <- M[[p]][["var.idx"]] - - # note: if a decent 'sweep operator' was available (in fortran) - # we might win some time by 'updating' the inverse by sweeping - # out the changed patterns... (but to get the logdet, we need - # to do it one column at a time?) - - #cat("FIML: pattern ", p, "\n") - #print(Sigma.hat[var.idx, var.idx]) - - Sigma.inv <- inv.chol(Sigma.hat[var.idx, var.idx], logdet=TRUE) - Sigma.log.det <- attr(Sigma.inv, "logdet") - Mu <- Mu.hat[var.idx] - - TT <- SX + tcrossprod(MX - Mu) - trace <- sum(Sigma.inv * TT) - - fx.p[p] <- Sigma.log.det + trace + if(is.null(N)) { + N <- sum(sapply(Yp, "[[", "freq")) } - fx <- weighted.mean(fx.p, w=w.p) + fx <- lav_mvnorm_missing_loglik_samplestats(Yp = Yp, + Mu = Mu.hat, Sigma = Sigma.hat, + log2pi = FALSE, + minus.two = TRUE)/N # ajust for h1 if(!is.null(h1)) { fx <- fx - h1 # no negative values - if(fx < 0.0) fx <- 0.0 + if(is.finite(fx) && fx < 0.0) fx <- 0.0 } fx @@ -288,7 +265,8 @@ ##Fmin <- sum( prop*log(prop/pairwisePI) ) Fmin <- sum( freq * log(prop/pairwisePI) ) # to avoid 'N' - if(missing == "available.cases" || missing =="doubly.robust.PML") { + if(missing == "available.cases" || missing == "doubly.robust") { + uniPI <- univariateExpProbVec(TH = TH, th.idx = th.idx) # shortcuts @@ -312,110 +290,121 @@ Fmin <- Fmin + sum(uniweights * log(uniprop/uniPI)) } - if (missing =="doubly.robust.PML") { - - # COMPUTE THE SUM OF THE EXPECTED BIVARIATE CONDITIONAL LIKELIHOODS - #SUM_{i,j} [ E_{Yi,Yj|y^o}}(lnf(Yi,Yj))) ] + if (missing =="doubly.robust") { - #First compute the terms of the summand. Since the cells of - # pairwiseProbGivObs are zero for the pairs of variables that at least - #one of the variables is observed (hence not contributing to the summand) - #there is no need to construct an index vector for summing appropriately - #within each individual. - log_pairwisePI_orig <- log(pairwisePI_orig) - pairwiseProbGivObs <- lavcache$pairwiseProbGivObs - tmp_prod <- t(t(pairwiseProbGivObs)*log_pairwisePI_orig) - - SumElnfijCasewise <- apply(tmp_prod, 1, sum) - SumElnfij <- sum(SumElnfijCasewise) - logl <- logl + SumElnfij - - # COMPUTE THE THE SUM OF THE EXPECTED UNIVARIATE CONDITIONAL LIKELIHOODS - # SUM_{i,j} [ E_{Yj|y^o}}(lnf(Yj|yi))) ] - - #First compute the model-implied conditional univariate probabilities - # p(y_i=a|y_j=b). Let ModProbY1Gy2 be the vector of these - # probabilities. The order the probabilities - #are listed in the vector ModProbY1Gy2 is as follows: - # y1|y2, y1|y3, ..., y1|yp, y2|y1, y2|y3, ..., y2|yp, - # ..., yp|y1, yp|y2, ..., yp|y(p-1). Within each pair of variables the - #index "a" which represents the response category of variable yi runs faster than - #"b" which represents the response category of the given variable yj. - #The computation of these probabilities are based on the model-implied - #bivariate probabilities p(y_i=a,y_j=b). To do the appropriate summations - #and divisions we need some index vectors to keep track of the index i, j, - #a, and b, as well as the pair index. These index vectors should be - #computed once and stored in lavcache. About where in the lavaan code - #we will add the computations and how they will be done please see the - #file "new objects in lavcache for DR-PL.r" - - idx.pairs <- lavcache$idx.pairs - idx.cat.y2.split <- lavcache$idx.cat.y2.split - idx.cat.y1.split <- lavcache$idx.cat.y1.split - idx.Y1 <- lavcache$idx.Y1 - idx.Gy2 <- lavcache$idx.Gy2 - idx.cat.Y1 <- lavcache$idx.cat.Y1 - idx.cat.Gy2 <- lavcache$idx.cat.Gy2 - id.uniPrGivObs <- lavcache$id.uniPrGivObs - #the latter keeps track which variable each column of the matrix - #univariateProbGivObs refers to - - #For the function compute_uniCondProb_based_on_bivProb see the .r file - #with the same name. - compute_uniCondProb_based_on_bivProb <- function() { - stop("not ready yet!") - } - ModProbY1Gy2 <- compute_uniCondProb_based_on_bivProb( - bivProb = pairwisePI_orig, - nvar = nvar, - idx.pairs = idx.pairs, - idx.Y1 = idx.Y1, - idx.Gy2 = idx.Gy2, - idx.cat.y1.split = idx.cat.y1.split, - idx.cat.y2.split = idx.cat.y2.split) - - log_ModProbY1Gy2 <- log(ModProbY1Gy2) - - #Let univariateProbGivObs be the matrix of the conditional univariate - # probabilities Pr(y_i=a|y^o) that has been computed in advance and are - #fed to the DR-PL function. The rows represent different individuals, - #i.e. nrow=nobs, and the columns different probabilities. The columns - # are listed as follows: a runs faster than i. - - #Note that the number of columns of univariateProbGivObs is not the - #same with the length(log_ModProbY1Gy2), actually - #ncol(univariateProbGivObs) < length(log_ModProbY1Gy2). - #For this we use the following commands in order to multiply correctly. - - #Compute for each case the product Pr(y_i=a|y^o) * log[ p(y_i=a|y_j=b) ] - #i.e. univariateProbGivObs * log_ModProbY1Gy2 - univariateProbGivObs <- lavcache$univariateProbGivObs - nobs <- nrow(X) - Cond_prod <- matrix(NA, nrow=nobs, ncol=length(ModProbY1Gy2) ) - - for(i in 1:nvar) { - tmp.mat <- univariateProbGivObs[ , id.uniPrGivObs==i] - tmp.vec <- log_ModProbY1Gy2[ idx.Y1==i] - #note that tmp.vec is longer than ncol(tmp.mat). That's why - #we use apply below where the function is done row-wise - #x recycles as we wish to meet the length of tmp.vec - Cond_prod[ , idx.Y1==i] <- - t( apply(tmp.mat, 1, function(x){tmp.vec*x} ) ) - } - - #Since the cells of univariateProbGivObs are zero for the variables - #that are observed (hence not contributing to the summand) - #there is no need to construct an index vector for summing appropriately - #within each individual. - ElnyiGivyjbCasewise <- apply(Cond_prod, 1, sum) - ElnyiGivyjb <- sum(ElnyiGivyjbCasewise) - logl <- logl + ElnyiGivyjb - logl <- logl + ElnyiGivyjb + # COMPUTE THE SUM OF THE EXPECTED BIVARIATE CONDITIONAL LIKELIHOODS + #SUM_{i,j} [ E_{Yi,Yj|y^o}}(lnf(Yi,Yj))) ] - # for the Fmin function - Fmin <- lavcache$FitFunctionConst -logl + #First compute the terms of the summand. Since the cells of + # pairwiseProbGivObs are zero for the pairs of variables that at least + #one of the variables is observed (hence not contributing to the summand) + #there is no need to construct an index vector for summing appropriately + #within each individual. + log_pairwisePI_orig <- log(pairwisePI_orig) + pairwiseProbGivObs <- lavcache$pairwiseProbGivObs + tmp_prod <- t(t(pairwiseProbGivObs)*log_pairwisePI_orig) + + SumElnfijCasewise <- apply(tmp_prod, 1, sum) + SumElnfij <- sum(SumElnfijCasewise) + logl <- logl + SumElnfij + Fmin <- Fmin - SumElnfij + + # COMPUTE THE THE SUM OF THE EXPECTED UNIVARIATE CONDITIONAL LIKELIHOODS + # SUM_{i,j} [ E_{Yj|y^o}}(lnf(Yj|yi))) ] + + #First compute the model-implied conditional univariate probabilities + # p(y_i=a|y_j=b). Let ModProbY1Gy2 be the vector of these + # probabilities. The order the probabilities + #are listed in the vector ModProbY1Gy2 is as follows: + # y1|y2, y1|y3, ..., y1|yp, y2|y1, y2|y3, ..., y2|yp, + # ..., yp|y1, yp|y2, ..., yp|y(p-1). Within each pair of variables the + #index "a" which represents the response category of variable yi runs faster than + #"b" which represents the response category of the given variable yj. + #The computation of these probabilities are based on the model-implied + #bivariate probabilities p(y_i=a,y_j=b). To do the appropriate summations + #and divisions we need some index vectors to keep track of the index i, j, + #a, and b, as well as the pair index. These index vectors should be + #computed once and stored in lavcache. About where in the lavaan code + #we will add the computations and how they will be done please see the + #file "new objects in lavcache for DR-PL.r" + + idx.pairs <- lavcache$idx.pairs + idx.cat.y2.split <- lavcache$idx.cat.y2.split + idx.cat.y1.split <- lavcache$idx.cat.y1.split + idx.Y1 <- lavcache$idx.Y1 + idx.Gy2 <- lavcache$idx.Gy2 + idx.cat.Y1 <- lavcache$idx.cat.Y1 + idx.cat.Gy2 <- lavcache$idx.cat.Gy2 + id.uniPrGivObs <- lavcache$id.uniPrGivObs + #the latter keeps track which variable each column of the matrix + #univariateProbGivObs refers to + + #For the function compute_uniCondProb_based_on_bivProb see the .r file + #with the same name. + ModProbY1Gy2 <- compute_uniCondProb_based_on_bivProb( + bivProb = pairwisePI_orig, + nvar = nvar, + idx.pairs = idx.pairs, + idx.Y1 = idx.Y1, + idx.Gy2 = idx.Gy2, + idx.cat.y1.split = idx.cat.y1.split, + idx.cat.y2.split = idx.cat.y2.split) + + log_ModProbY1Gy2 <- log(ModProbY1Gy2) + + #Let univariateProbGivObs be the matrix of the conditional univariate + # probabilities Pr(y_i=a|y^o) that has been computed in advance and are + #fed to the DR-PL function. The rows represent different individuals, + #i.e. nrow=nobs, and the columns different probabilities. The columns + # are listed as follows: a runs faster than i. + + #Note that the number of columns of univariateProbGivObs is not the + #same with the length(log_ModProbY1Gy2), actually + #ncol(univariateProbGivObs) < length(log_ModProbY1Gy2). + #For this we use the following commands in order to multiply correctly. + + #Compute for each case the product Pr(y_i=a|y^o) * log[ p(y_i=a|y_j=b) ] + #i.e. univariateProbGivObs * log_ModProbY1Gy2 + univariateProbGivObs <- lavcache$univariateProbGivObs + nobs <- nrow(X) + uniweights.casewise <- lavcache$uniweights.casewise + id.cases.with.missing <- which(uniweights.casewise > 0) + no.cases.with.missing <- length(id.cases.with.missing) + no.obs.casewise <- nvar - uniweights.casewise + idx.missing.var <- apply(X, 1, function(x) { + which(is.na(x)) + }) + idx.observed.var <- lapply(idx.missing.var, function(x) { + c(1:nvar)[-x] + }) + idx.cat.observed.var <- sapply(1:nobs, function(i) { + X[i, idx.observed.var[[i]]] + }) + ElnyiGivyjbCasewise <- sapply(1:no.cases.with.missing,function(i) { + tmp.id.case <- id.cases.with.missing[i] + tmp.no.mis <- uniweights.casewise[tmp.id.case] + tmp.idx.mis <- idx.missing.var[[tmp.id.case]] + tmp.idx.obs <- idx.observed.var[[tmp.id.case]] + tmp.no.obs <- no.obs.casewise[tmp.id.case] + tmp.idx.cat.obs <- idx.cat.observed.var[[tmp.id.case]] + tmp.uniProbGivObs.i <- univariateProbGivObs[tmp.id.case, ] + sapply(1:tmp.no.mis, function(k) { + tmp.idx.mis.var <- tmp.idx.mis[k] + tmp.uniProbGivObs.ik <- + tmp.uniProbGivObs.i[id.uniPrGivObs == tmp.idx.mis.var] + tmp.log_ModProbY1Gy2 <- sapply(1:tmp.no.obs, function(z) { + log_ModProbY1Gy2[idx.Y1 == tmp.idx.mis.var & + idx.Gy2 == tmp.idx.obs[z] & + idx.cat.Gy2 == tmp.idx.cat.obs[z]]}) + sum(tmp.log_ModProbY1Gy2 * tmp.uniProbGivObs.ik) + }) + }) + ElnyiGivyjb <- sum(unlist(ElnyiGivyjbCasewise)) + logl <- logl + ElnyiGivyjb + # for the Fmin function + Fmin <- Fmin - ElnyiGivyjb - } #end of if (missing =="doubly.robust.PML") + } #end of if (missing =="doubly.robust") } else { # # order! first i, then j, lav_matrix_vec(table)! diff -Nru r-cran-lavaan-0.5.22/R/lav_object_methods.R r-cran-lavaan-0.5.23.1097/R/lav_object_methods.R --- r-cran-lavaan-0.5.22/R/lav_object_methods.R 2016-09-24 13:02:01.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_object_methods.R 2017-02-24 12:55:31.000000000 +0000 @@ -5,10 +5,8 @@ # catch FAKE run FAKE <- FALSE - if(!is.null(object@Model@control$optim.method)) { - if(tolower(object@Model@control$optim.method) == "none") { - FAKE <- TRUE - } + if(object@Options$optim.method == "none") { + FAKE <- TRUE } # Convergence or not? @@ -327,393 +325,6 @@ } }) -# old summary (<0.5-19) -summary2 <- function(object, estimates=TRUE, fit.measures=FALSE, - standardized=FALSE, - rsquare=FALSE, std.nox=FALSE, modindices=FALSE) { - - if(std.nox) standardized <- TRUE - - # always print the 'short' summary - short.summary(object) - - # only if requested, the fit measures - if(fit.measures) { - if(object@Options$test == "none") { - warning("lavaan WARNING: fit measures not available if test = \"none\"\n\n") - } else if(object@optim$npar > 0L && !object@optim$converged) { - warning("lavaan WARNING: fit measures not available if model did not converge\n\n") - } else { - print.fit.measures( fitMeasures(object, fit.measures="default") ) - } - } - - - if(estimates) { - - # main part: parameter estimates - cat("\nParameter estimates:\n\n") - t0.txt <- sprintf(" %-40s", "Information") - tmp.txt <- object@Options$information - t1.txt <- sprintf(" %10s", paste(toupper(substring(tmp.txt,1,1)), - substring(tmp.txt,2), sep="")) - cat(t0.txt, t1.txt, "\n", sep="") - t0.txt <- sprintf(" %-31s", "Standard Errors") - tmp.txt <- object@Options$se - t1.txt <- sprintf(" %19s", paste(toupper(substring(tmp.txt,1,1)), - substring(tmp.txt,2), sep="")) - cat(t0.txt, t1.txt, "\n", sep="") - if(object@Options$se == "bootstrap") { - t0.txt <- sprintf(" %-40s", "Number of requested bootstrap draws") - t1.txt <- sprintf(" %10i", object@Options$bootstrap) - cat(t0.txt, t1.txt, "\n", sep="") - t0.txt <- sprintf(" %-40s", "Number of successful bootstrap draws") - t1.txt <- sprintf(" %10i", NROW(object@boot$coef)) - cat(t0.txt, t1.txt, "\n", sep="") - } - cat("\n") - - # local print function - print.estimate <- function(name="ERROR", i=1, z.stat=TRUE) { - - # cut name if (still) too long - name <- strtrim(name, width=13L) - - if(!standardized) { - if(is.na(se[i])) { - txt <- sprintf(" %-13s %9.3f %8.3f\n", name, est[i], se[i]) - } else if(se[i] == 0) { - txt <- sprintf(" %-13s %9.3f\n", name, est[i]) - } else if(est[i]/se[i] > 9999.999) { - txt <- sprintf(" %-13s %9.3f %8.3f\n", name, est[i], se[i]) - } else if(!z.stat) { - txt <- sprintf(" %-13s %9.3f %8.3f\n", name, est[i], se[i]) - } else { - z <- est[i]/se[i] - pval <- 2 * (1 - pnorm( abs(z) )) - txt <- sprintf(" %-13s %9.3f %8.3f %8.3f %8.3f\n", - name, est[i], se[i], z, pval) - } - } else { - if(is.na(se[i])) { - txt <- sprintf(" %-13s %9.3f %8.3f %8.3f %8.3f\n", name, est[i], se[i], est.std[i], est.std.all[i]) - } else if(se[i] == 0) { - txt <- sprintf(" %-13s %9.3f %8.3f %8.3f\n", name, est[i], est.std[i], est.std.all[i]) - } else if(est[i]/se[i] > 9999.999) { - txt <- sprintf(" %-13s %9.3f %8.3f %8.3f %8.3f\n", name, est[i], se[i], est.std[i], est.std.all[i]) - } else if(!z.stat) { - txt <- sprintf(" %-13s %9.3f %8.3f %8.3f %8.3f\n", name, est[i], se[i], est.std[i], est.std.all[i]) - } else { - z <- est[i]/se[i] - pval <- 2 * (1 - pnorm( abs(z) )) - txt <- sprintf(" %-13s %9.3f %8.3f %8.3f %8.3f %8.3f %8.3f\n", - name, est[i], se[i], z, pval, est.std[i], est.std.all[i]) - } - } - cat(txt) - } - - est <- lav_object_inspect_est(object) - se <- lav_object_inspect_se(object) - if(rsquare || standardized) { - est.std <- standardize.est.lv(object) - if(std.nox) { - est.std.all <- standardize.est.all.nox(object, est.std=est.std) - } else { - est.std.all <- standardize.est.all(object, est.std=est.std) - } - } - - for(g in 1:object@Data@ngroups) { - ov.names <- vnames(object@ParTable, "ov", group=g) - lv.names <- vnames(object@ParTable, "lv", group=g) - - # group header - if(object@Data@ngroups > 1) { - if(g > 1) cat("\n\n") - cat("Group ", g, - " [", object@Data@group.label[[g]], "]:\n\n", sep="") - } - - # estimates header - if(!standardized) { - cat(" Estimate Std.err Z-value P(>|z|)\n") - } else { - if(std.nox) { - cat(" Estimate Std.err Z-value P(>|z|) Std.lv Std.nox\n") - } - else { - cat(" Estimate Std.err Z-value P(>|z|) Std.lv Std.all\n") - } - } - - makeNames <- function(NAMES, LABELS) { - multiB <- FALSE - if(any(nchar(NAMES) != nchar(NAMES, "bytes"))) - multiB <- TRUE - if(any(nchar(LABELS) != nchar(LABELS, "bytes"))) - multiB <- TRUE - # labels? - l.idx <- which(nchar(LABELS) > 0L) - if(length(l.idx) > 0L) { - if(!multiB) { - LABELS <- abbreviate(LABELS, 4) - LABELS[l.idx] <- paste(" (", LABELS[l.idx], ")", sep="") - MAX.L <- max(nchar(LABELS)) - NAMES <- abbreviate(NAMES, minlength = (13 - MAX.L), - strict = TRUE) - } else { - # do not abbreviate anything (eg in multi-byte locales) - MAX.L <- 4L - } - NAMES <- sprintf(paste("%-", (13 - MAX.L), "s%", MAX.L, "s", - sep=""), NAMES, LABELS) - } else { - if(!multiB) { - NAMES <- abbreviate(NAMES, minlength = 13, strict = TRUE) - } else { - NAMES <- sprintf(paste("%-", 13, "s", sep=""), NAMES) - } - } - - NAMES - } - - NAMES <- object@ParTable$rhs - - # 1a. indicators ("=~") (we do show dummy indicators) - mm.idx <- which( object@ParTable$op == "=~" & - !object@ParTable$lhs %in% ov.names & - object@ParTable$group == g) - if(length(mm.idx)) { - cat("Latent variables:\n") - lhs.old <- "" - NAMES[mm.idx] <- makeNames( object@ParTable$rhs[mm.idx], - object@ParTable$label[mm.idx]) - for(i in mm.idx) { - lhs <- object@ParTable$lhs[i] - if(lhs != lhs.old) cat(" ", lhs, " =~\n", sep="") - print.estimate(name=NAMES[i], i) - lhs.old <- lhs - } - cat("\n") - } - - # 1b. formative/composites ("<~") - fm.idx <- which( object@ParTable$op == "<~" & - object@ParTable$group == g) - if(length(fm.idx)) { - cat("Composites:\n") - lhs.old <- "" - NAMES[fm.idx] <- makeNames( object@ParTable$rhs[fm.idx], - object@ParTable$label[fm.idx]) - for(i in fm.idx) { - lhs <- object@ParTable$lhs[i] - if(lhs != lhs.old) cat(" ", lhs, " <~\n", sep="") - print.estimate(name=NAMES[i], i) - lhs.old <- lhs - } - cat("\n") - } - - # 2. regressions - eqs.idx <- which(object@ParTable$op == "~" & object@ParTable$group == g) - if(length(eqs.idx) > 0) { - cat("Regressions:\n") - lhs.old <- "" - NAMES[eqs.idx] <- makeNames( object@ParTable$rhs[eqs.idx], - object@ParTable$label[eqs.idx]) - for(i in eqs.idx) { - lhs <- object@ParTable$lhs[i] - if(lhs != lhs.old) cat(" ", lhs, " ~\n", sep="") - print.estimate(name=NAMES[i], i) - lhs.old <- lhs - } - cat("\n") - } - - # 3. covariances - cov.idx <- which(object@ParTable$op == "~~" & - !object@ParTable$exo & - object@ParTable$lhs != object@ParTable$rhs & - object@ParTable$group == g) - if(length(cov.idx) > 0) { - cat("Covariances:\n") - lhs.old <- "" - NAMES[cov.idx] <- makeNames( object@ParTable$rhs[cov.idx], - object@ParTable$label[cov.idx]) - for(i in cov.idx) { - lhs <- object@ParTable$lhs[i] - if(lhs != lhs.old) cat(" ", lhs, " ~~\n", sep="") - print.estimate(name=NAMES[i], i) - lhs.old <- lhs - } - cat("\n") - } - - # 4. intercepts/means - #ord.names <- vnames(object@ParTable, type="ov.ord", group=g) - int.idx <- which(object@ParTable$op == "~1" & - #!object@ParTable$lhs %in% ord.names & - !object@ParTable$exo & - object@ParTable$group == g) - if(length(int.idx) > 0) { - cat("Intercepts:\n") - NAMES[int.idx] <- makeNames( object@ParTable$lhs[int.idx], - object@ParTable$label[int.idx]) - for(i in int.idx) { - print.estimate(name=NAMES[i], i) - } - cat("\n") - } - - # 4b thresholds - th.idx <- which(object@ParTable$op == "|" & - object@ParTable$group == g) - if(length(th.idx) > 0) { - cat("Thresholds:\n") - NAMES[th.idx] <- makeNames( paste(object@ParTable$lhs[th.idx], - "|", - object@ParTable$rhs[th.idx], - sep=""), - object@ParTable$label[th.idx]) - for(i in th.idx) { - print.estimate(name=NAMES[i], i) - } - cat("\n") - } - - # 5. (residual) variances - var.idx <- which(object@ParTable$op == "~~" & - !object@ParTable$exo & - object@ParTable$lhs == object@ParTable$rhs & - object@ParTable$group == g) - if(length(var.idx) > 0) { - cat("Variances:\n") - NAMES[var.idx] <- makeNames( object@ParTable$rhs[var.idx], - object@ParTable$label[var.idx]) - for(i in var.idx) { - if(object@Options$mimic == "lavaan") { - print.estimate(name=NAMES[i], i, z.stat=FALSE) - } else { - print.estimate(name=NAMES[i], i, z.stat=TRUE) - } - } - cat("\n") - } - - # 6. latent response scales - delta.idx <- which(object@ParTable$op == "~*~" & - object@ParTable$group == g) - if(length(delta.idx) > 0) { - cat("Scales y*:\n") - NAMES[delta.idx] <- makeNames( object@ParTable$rhs[delta.idx], - object@ParTable$label[delta.idx]) - for(i in delta.idx) { - print.estimate(name=NAMES[i], i, z.stat=TRUE) - } - cat("\n") - } - - # 7. group weight - group.idx <- which(object@ParTable$lhs == "group" & - object@ParTable$op == "%" & - object@ParTable$group == g) - if(length(group.idx) > 0) { - cat("Group weight:\n") - NAMES[group.idx] <- makeNames( object@ParTable$rhs[group.idx], - object@ParTable$label[group.idx]) - for(i in group.idx) { - print.estimate(name=NAMES[i], i, z.stat=TRUE) - } - cat("\n") - } - - } # ngroups - - # 6. variable definitions - def.idx <- which(object@ParTable$op == ":=") - if(length(def.idx) > 0) { - if(object@Data@ngroups > 1) cat("\n") - cat("Defined parameters:\n") - NAMES[def.idx] <- makeNames( object@ParTable$lhs[def.idx], "") - for(i in def.idx) { - print.estimate(name=NAMES[i], i) - } - cat("\n") - } - - # 7. constraints - cin.idx <- which((object@ParTable$op == "<" | - object@ParTable$op == ">")) - ceq.idx <- which(object@ParTable$op == "==" & object@ParTable$user == 1L) - if(length(cin.idx) > 0L || length(ceq.idx) > 0L) { - # set small negative values to zero, to avoid printing " -0.000" - slack <- ifelse(abs(est) < 1e-5, 0, est) - #slack[cin.idx] <- object@Model@cin.function(object@optim$x) - #slack[ceq.idx] <- object@Model@ceq.function(object@optim$x) - - if(object@Data@ngroups > 1 && length(def.idx) == 0L) cat("\n") - cat("Constraints: Slack (>=0)\n") - for(i in c(cin.idx,ceq.idx)) { - lhs <- object@ParTable$lhs[i] - op <- object@ParTable$op[i] - rhs <- object@ParTable$rhs[i] - if(rhs == "0" && op == ">") { - con.string <- paste(lhs, " - 0", sep="") - } else if(rhs == "0" && op == "<") { - con.string <- paste(rhs, " - (", lhs, ")", sep="") - } else if(rhs != "0" && op == ">") { - con.string <- paste(lhs, " - (", rhs, ")", sep="") - } else if(rhs != "0" && op == "<") { - con.string <- paste(rhs, " - (", lhs, ")", sep="") - } else if(rhs == "0" && op == "==") { - con.string <- paste(lhs, " - 0", sep="") - } else if(rhs != "0" && op == "==") { - con.string <- paste(lhs, " - (", rhs, ")", sep="") - } - con.string <- abbreviate(con.string, 41, strict = TRUE) - txt <- sprintf(" %-41s %8.3f\n", - con.string, slack[i]) - cat(txt) - } - cat("\n") - } - - } # parameter estimates - - - # R-square? - if(rsquare) { - r2 <- lav_object_inspect_rsquare(object, est.std.all=est.std.all, - drop.list.single.group = FALSE, add.labels = TRUE, - add.class = FALSE) - for(g in 1:object@Data@ngroups) { - if(object@Data@ngroups > 1) { - cat("R-Square Group ", g, " [", - object@Data@group.label[[g]], "]", - ":\n\n", sep="") - } else { - cat("R-Square:\n\n") - } - for(i in 1:length(r2[[g]])) { - t1.txt <- sprintf(" %-13s %9.3f\n", names(r2[[g]])[i], - r2[[g]][i]) - cat(t1.txt) - } - if(g < object@Data@ngroups) cat("\n") - } - } - - # modification indices? - if(modindices) { - cat("Modification Indices:\n\n") - print( modificationIndices(object, standardized=TRUE) ) - } - -} - - setMethod("coef", "lavaan", function(object, type="free", labels=TRUE) { @@ -745,7 +356,9 @@ pvalue = TRUE, remove.eq = TRUE, remove.ineq = TRUE, - remove.def = FALSE) { + remove.def = FALSE, + GLIST = NULL, + est = NULL) { stopifnot(type %in% c("std.all", "std.lv", "std.nox")) @@ -754,6 +367,15 @@ zstat <- pvalue <- FALSE } + # no se if class is not lavaan + if(class(object) != "lavaan") { + if(missing(se) || !se) { + se <- FALSE + zstat <- FALSE + pvalue <- FALSE + } + } + PARTABLE <- inspect(object, "list") free.idx <- which(PARTABLE$free > 0L) LIST <- PARTABLE[,c("lhs", "op", "rhs")] @@ -763,11 +385,11 @@ # add std and std.all columns if(type == "std.lv") { - LIST$est.std <- standardize.est.lv(object) + LIST$est.std <- standardize.est.lv(object, est = est, GLIST = GLIST) } else if(type == "std.all") { - LIST$est.std <- standardize.est.all(object) + LIST$est.std <- standardize.est.all(object, est = est, GLIST = GLIST) } else if(type == "std.nox") { - LIST$est.std <- standardize.est.all.nox(object) + LIST$est.std <- standardize.est.all.nox(object, est = est, GLIST = GLIST) } if(object@Options$se != "none" && se) { @@ -857,6 +479,19 @@ rsquare = FALSE, add.attributes = FALSE) { + if("lavaan.fsr" %in% class(object)) { + return(object$PE) + } + + # no se if class is not lavaan + if(class(object) != "lavaan") { + if(missing(se) || !se) { + se <- FALSE + zstat <- FALSE + pvalue <- FALSE + } + } + # check fmi if(fmi) { if(inherits(object, "lavaanList")) { @@ -891,6 +526,16 @@ if(!is.null(PARTABLE$user)) { LIST$user <- PARTABLE$user } + if(!is.null(PARTABLE$block)) { + LIST$block <- PARTABLE$block + } else { + LIST$block <- rep(1L, length(LIST$lhs)) + } + if(!is.null(PARTABLE$level)) { + LIST$level <- PARTABLE$level + } else { + LIST$level <- rep(1L, length(LIST$lhs)) + } if(!is.null(PARTABLE$group)) { LIST$group <- PARTABLE$group } else { @@ -1090,7 +735,7 @@ r2 <- lavTech(object, "rsquare", add.labels = TRUE) NAMES <- unlist(lapply(r2, names)); nel <- length(NAMES) R2 <- data.frame( lhs = NAMES, op = rep("r2", nel), rhs = NAMES, - group = rep(1:length(r2), sapply(r2, length)), + block = rep(1:length(r2), sapply(r2, length)), est = unlist(r2), stringsAsFactors = FALSE ) LIST <- lav_partable_merge(pt1 = LIST, pt2 = R2, warn = FALSE) } @@ -1133,9 +778,18 @@ LIST$fmi <- 1-(SE.step2*SE.step2/(SE.orig*SE.orig)) } + # if single level, remove level column + if(object@Data@nlevels == 1L) LIST$level <- NULL + # if single group, remove group column if(object@Data@ngroups == 1L) LIST$group <- NULL + # if single everything, remove block column + if(object@Data@nlevels == 1L && + object@Data@ngroups == 1L) { + LIST$block <- NULL + } + # if no user-defined labels, remove label column if(sum(nchar(object@ParTable$label)) == 0L) LIST$label <- NULL @@ -1176,8 +830,13 @@ attr(LIST, "information") <- object@Options$information attr(LIST, "se") <- object@Options$se attr(LIST, "group.label") <- object@Data@group.label + attr(LIST, "level.label") <- object@Data@level.label attr(LIST, "bootstrap") <- object@Options$bootstrap attr(LIST, "bootstrap.successful") <- bootstrap.successful + attr(LIST, "missing") <- object@Options$missing + attr(LIST, "observed.information") <- + object@Options$observed.information + attr(LIST, "h1.information") <- object@Options$h1.information # FIXME: add more!! } else { LIST$exo <- NULL @@ -1259,7 +918,7 @@ stop("lavaan ERROR: vcov not available if se=\"none\"") } - VarCov <- lav_object_inspect_vcov(lavobject = object, + VarCov <- lav_object_inspect_vcov(object, add.labels = labels, add.class = TRUE, remove.duplicated = remove.duplicated) diff -Nru r-cran-lavaan-0.5.22/R/lav_options.R r-cran-lavaan-0.5.23.1097/R/lav_options.R --- r-cran-lavaan-0.5.22/R/lav_options.R 2016-09-21 10:54:39.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_options.R 2017-02-22 10:06:36.000000000 +0000 @@ -1,12 +1,147 @@ # initial version YR 02/08/2010 +# YR 28 Jan 2017: add lavOptions(), lav_options_default() + +# public function +lavOptions <- function(x = NULL, default = NULL, mimic = "lavaan") { + + lavoptions <- lav_options_default(mimic = mimic) + + # selection only + if(!is.null(x)) { + if(is.character(x)) { + # lower case only + x <- tolower(x) + + # check if x is in names(lavoptions) + not.ok <- which(!x %in% names(lavoptions)) + if(length(not.ok) > 0L) { + # only warn if multiple options were requested + if(length(x) > 1L) { + warning("lavaan WARNING: option `", x[not.ok], + "' not available") + } + x <- x[ -not.ok ] + } + + # return requested option(s) + if(length(x) == 0L) { + return(default) + } else { + lavoptions[x] + } + } else { + stop("lavaan ERROR: `x' must be a character string") + } + } else { + lavoptions + } +} + +# set the default options (including unspecified values "default") +lav_options_default <- function(mimic = "lavaan") { + + opt <- list(model.type = "sem", + + # global + mimic = "lavaan", + + # model modifiers + meanstructure = "default", + int.ov.free = FALSE, + int.lv.free = FALSE, + conditional.x = "default", # or FALSE? + fixed.x = "default", # or FALSE? + orthogonal = FALSE, + std.lv = FALSE, + parameterization = "default", + + auto.fix.first = FALSE, + auto.fix.single = FALSE, + auto.var = FALSE, + auto.cov.lv.x = FALSE, + auto.cov.y = FALSE, + auto.th = FALSE, + auto.delta = FALSE, + + # full data + std.ov = FALSE, + missing = "default", + + # summary data + sample.cov.rescale = "default", + ridge = 1e-5, + + # multiple groups + group = NULL, + group.label = NULL, + group.equal = '', + group.partial = '', + group.w.free = FALSE, + + # clusters + cluster = NULL, + level.label = NULL, + + # estimation + estimator = "default", + likelihood = "default", + link = "default", + representation = "default", + do.fit = TRUE, + + # inference + information = "default", + h1.information = "structured", + #h1.information.se = "structured", + #h1.information.test = "structured", + se = "default", + test = "default", + bootstrap = 1000L, + observed.information = "hessian", + + # optimization + control = list(), + optim.method = "nlminb", + optim.method.cor = "nlminb", + optim.force.converged = FALSE, + optim.gradient = "analytic", + optim.init_nelder_mead = FALSE, + + # numerical integration + integration.ngh = 21L, + + # parallel + parallel = "no", + ncpus = 1L, + cl = NULL, + iseed = NULL, + + # zero values + zero.add = "default", + zero.keep.margins = "default", + zero.cell.warn = TRUE, + + # starting values + start = "default", + + # sanity checks + check = c("start", "post"), + + # verbosity + verbose = FALSE, + warn = TRUE, + debug = FALSE) + + opt +} + # this function collects and checks the user-provided options/arguments, # and fills in the "default" values, or changes them in an attempt to # produce a consistent set of values... # # returns a list with the named options - -lav_options_set <- function(opt = formals(lavaan)) { +lav_options_set <- function(opt = NULL) { if(opt$debug) { cat("lavaan DEBUG: lavaanOptions IN\n"); str(opt) } @@ -119,7 +254,29 @@ "uls", "ulsm", "ulsmv", "pml")) { stop("lavaan ERROR: missing=\"ml\" is not allowed for estimator MLM, MLMV, GLS, ULS, ULSM, ULSMV, DWLS, WLS, WLSM, WLSMV, PML") } - } else if(opt$missing %in% c("two.stage", "listwise")) { + } else if(opt$missing %in% c("two.stage", "twostage", "two-stage", + "two.step", "twostep", "two-step")) { + opt$missing <- "two.stage" + if(opt$categorical) { + stop("lavaan ERROR: missing=\"two.stage\" not available in the categorical setting") + } + if(opt$estimator %in% c("mlm", "mlmv", "gls", "wls", "wlsm", "wlsmv", + "uls", "ulsm", "ulsmv", "pml", "mml")) { + stop("lavaan ERROR: missing=\"two.stage\" is not allowed for estimator MLM, MLMV, GLS, ULS, ULSM, ULSMV, DWLS, WLS, WLSM, WLSMV, PML, MML") + } + } else if(opt$missing %in% c("robust.two.stage", "robust.twostage", + "robust.two-stage", "robust-two-stage", + "robust.two.step", "robust.twostep", + "robust-two-step")) { + opt$missing <- "robust.two.stage" + if(opt$categorical) { + stop("lavaan ERROR: missing=\"robust.two.stage\" not available in the categorical setting") + } + if(opt$estimator %in% c("mlm", "mlmv", "gls", "wls", "wlsm", "wlsmv", + "uls", "ulsm", "ulsmv", "pml", "mml")) { + stop("lavaan ERROR: missing=\"robust.two.stage\" is not allowed for estimator MLM, MLMV, GLS, ULS, ULSM, ULSMV, DWLS, WLS, WLSM, WLSMV, PML, MML") + } + } else if(opt$missing == "listwise") { # nothing to do } else if(opt$missing == "pairwise") { # nothing to do @@ -142,7 +299,12 @@ # default test statistic if(opt$test == "default") { - opt$test <- "standard" + if(opt$missing == "two.stage" || + opt$missing == "robust.two.stage") { + opt$test <- "satorra.bentler" + } else { + opt$test <- "standard" + } } else if(opt$test %in% c("none", "standard")) { # nothing to do } else if(opt$test == "satorra" || @@ -202,21 +364,72 @@ opt$missing <- "listwise" } + # missing = "two.stage" + if(opt$missing == "two.stage" || + opt$missing == "robust.two.stage") { + opt$meanstructure <- TRUE + # se + if(opt$se == "default") { + if(opt$missing == "two.stage") { + opt$se <- "two.stage" + } else { + opt$se <- "robust.two.stage" + } + } else if(opt$missing == "two.stage" && + opt$se == "two.stage") { + # nothing to do + } else if(opt$missing == "robust.two.stage" && + opt$se == "robust.two.stage") { + # nothing to do + } else { + warning("lavaan WARNING: se will be set to ", + dQuote(opt$missing), " if missing = ", + dQuote(opt$missing) ) + opt$se <- opt$missing + } + # information + if(opt$information == "default") { + # for both two.stage and robust.two.stage + opt$information <- "observed" + } else if(opt$information == "first.order") { + warning("lavaan WARNING: information will be set to ", + dQuote("observed"), " if missing = ", + dQuote(opt$missing) ) + opt$information <- "observed" + } + # observed.information (ALWAYS "h1" for now) + opt$observed.information <- "h1" + # test + if(opt$test == "default" || + opt$test == "satorra.bentler") { + opt$test <- "satorra.bentler" + } else { + warning("lavaan WARNING: test will be set to ", + dQuote("satorra.bentler"), " if missing = ", + dQuote(opt$missing) ) + opt$test <- "satorra.bentler" + } + } + + + # meanstructure if(is.logical(opt$meanstructure)) { if(opt$meanstructure == FALSE) { # user explicitly wants meanstructure == FALSE # check for conflicting arguments - if(opt$estimator %in% c("mlm", "mlmv", "mlr", "mlf", "ulsm", "ulsmv", "wlsm", "wlsmv", "pml")) + if(opt$estimator %in% c("mlm", "mlmv", "mlr", "mlf", "ulsm", "ulsmv", "wlsm", "wlsmv", "pml")) { warning("lavaan WARNING: estimator forces meanstructure = TRUE") - if(opt$missing == "ml") + } + if(opt$missing %in% c("ml", "two.stage")) { warning("lavaan WARNING: missing argument forces meanstructure = TRUE") + } } } else if(opt$meanstructure == "default") { # by default: no meanstructure! opt$meanstructure <- FALSE # unless there is a group argument? (added since 0.4-10) - if(!is.null(opt$group)) opt$meanstructure <- TRUE + # if(!is.null(opt$group)) opt$meanstructure <- TRUE } else { stop("meanstructure must be TRUE, FALSE or \"default\"\n") } @@ -231,10 +444,11 @@ # default estimator if(opt$estimator == "default") { - if(opt$categorical) + if(opt$categorical) { opt$estimator <- "wlsmv" - else + } else { opt$estimator <- "ml" + } } # backwards compatibility (0.4 -> 0.5) @@ -245,17 +459,15 @@ opt$estimator <- "ML" if(opt$se == "default") { opt$se <- "standard" - } else if(opt$se == "first.order" || - opt$se == "bootstrap" || - opt$se == "none" || - opt$se == "external" || - opt$se == "standard" || - opt$se == "robust.huber.white" || - opt$se == "robust.sem") { + } else if(opt$se %in% c("first.order","bootstrap", "none", + "external", "standard", "robust.huber.white", + "two.stage", "robust.two.stage", "robust.sem")) { # nothing to do } else if(opt$se == "robust") { if(opt$missing == "ml") { opt$se <- "robust.huber.white" + } else if(opt$missing == "two.stage") { + opt$se <- "robust.two.stage" } else { opt$se <- "robust.sem" } @@ -551,8 +763,8 @@ if(opt$information == "default") { if(opt$missing == "ml" || opt$se == "robust.huber.white" || - opt$se == "first.order" || - nchar(opt$constraints) > 0L) { + opt$se == "first.order") { + #nchar(opt$constraints) > 0L) { opt$information <- "observed" } else { opt$information <- "expected" @@ -563,6 +775,19 @@ stop("information must be either \"expected\" or \"observed\"\n") } + if(opt$h1.information == "structured" || + opt$h1.information == "unstructured") { + # nothing to do + } else { + stop("lavaan ERROR: h1.information must be either \"structured\" or \"unstructured\"\n") + } + #if(opt$h1.information.test == "structured" || + # opt$h1.information.test == "unstructured") { + # # nothing to do + #} else { + # stop("lavaan ERROR: h1.information.se must be either \"structured\" or \"unstructured\"\n") + #} + # check information if se == "robust.sem" if(opt$se == "robust.sem" && opt$information == "observed") { warning("lavaan WARNING: information will be set to ", diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_attributes.R r-cran-lavaan-0.5.23.1097/R/lav_partable_attributes.R --- r-cran-lavaan-0.5.22/R/lav_partable_attributes.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_attributes.R 2017-02-21 09:26:55.000000000 +0000 @@ -0,0 +1,61 @@ +# return 'attributes' of a lavaan partable -- generate a new set if necessary +lav_partable_attributes <- function(partable, pta = NULL) { + + if(is.null(pta)) { + # attached to partable? + pta <- attributes(partable) + if(!is.null(pta$vnames) && !is.null(pta$nvar)) { + # looks like a pta + return(pta) + } else { + pta <- list() + } + } + + # vnames + pta$vnames <- lav_partable_vnames(partable, type="all") + + # vidx + OV <- pta$vnames$ov + LV <- pta$vnames$lv + nblocks <- length(pta$vnames$ov) + pta$vidx <- lapply(names(pta$vnames), function(v) { + lapply(seq_len(nblocks), function(g) { + if(grepl("lv", v)) { + match(pta$vnames[[v]][[g]], LV[[g]]) + } else if(grepl("th", v)) { + # thresholds have '|t' pattern + TH <- sapply(strsplit(pta$vnames[[v]][[g]], + "|t", fixed = TRUE), "[[", 1L) + match(TH, OV[[g]]) + } else if(grepl("eqs", v)){ + # mixture of OV/LV + integer(0L) + } else { + match(pta$vnames[[v]][[g]], OV[[g]]) + } + }) + }) + names(pta$vidx) <- names(pta$vnames) + + # nblocks + pta$nblocks <- nblocks + + # ngroups + pta$ngroups <- lav_partable_ngroups(partable) + + # nlevels + pta$nlevels <- lav_partable_nlevels(partable) + + # nvar + pta$nvar <- lapply(pta$vnames$ov, length) + + # nfac + pta$nfac <- lapply(pta$vnames$lv, length) + + # nfac.nonnormal - for numerical integration + pta$nfac.nonnormal <- lapply(pta$vnames$lv.nonnormal, length) + + pta +} + diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_complete.R r-cran-lavaan-0.5.23.1097/R/lav_partable_complete.R --- r-cran-lavaan-0.5.22/R/lav_partable_complete.R 2016-06-13 12:25:41.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_complete.R 2017-02-21 09:17:07.000000000 +0000 @@ -47,14 +47,18 @@ partable$id <- seq_len(N) #} - # add group column - if(is.null(partable$group)) { - partable$group <- rep(1L, N) + # add block column + if(is.null(partable$block)) { + partable$block <- rep(1L, N) + } else { + partable$block <- as.integer(partable$block) } # add user column if(is.null(partable$user)) { partable$user <- rep(1L, N) + } else { + partable$user <- as.integer( partable$user ) } # add free column @@ -73,9 +77,9 @@ if(is.null(partable$ustart)) { # do we have something else? start? est? if(!is.null(partable$start)) { - partable$ustart <- partable$start + partable$ustart <- as.numeric(partable$start) } else if(!is.null(partable$est)) { - partable$ustart <- partable$est + partable$ustart <- as.numeric(partable$est) } else { partable$ustart <- rep(as.numeric(NA), N) non.free <- which(!partable$free) @@ -83,16 +87,22 @@ partable$ustart[non.free] <- 0 } } + } else { + partable$ustart <- as.numeric(partable$ustart) } # add exo column if(is.null(partable$exo)) { partable$exo <- rep(0, N) + } else { + partable$exo <- as.integer( partable$exo ) } # add label column if(is.null(partable$label)) { partable$label <- rep("", N) + } else { + partable$label <- as.character( partable$label ) } # add eq.id column @@ -106,7 +116,7 @@ #} # order them nicely: id lhs op rhs group - idx <- match(c("id", "lhs","op","rhs", "group","user", + idx <- match(c("id", "lhs","op","rhs", "block","user", "free","ustart","exo","label"), names(partable)) diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_constraints.R r-cran-lavaan-0.5.23.1097/R/lav_partable_constraints.R --- r-cran-lavaan-0.5.22/R/lav_partable_constraints.R 2016-06-13 16:02:12.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_constraints.R 2016-12-30 17:15:05.000000000 +0000 @@ -230,7 +230,8 @@ # # NOTE: very similar, but not identitical to ceq, because we need to take # care of the difference between '<' and '>' -lav_partable_constraints_ciq <- function(partable, con = NULL, debug = FALSE) { +lav_partable_constraints_ciq <- function(partable, con = NULL, debug = FALSE, + txtOnly = FALSE) { # empty function @@ -246,14 +247,22 @@ # get inequality constraints ineq.idx <- which(partable$op == ">" | partable$op == "<") - # catch empty ceq + # catch empty ciq if(length(ineq.idx) == 0L) { - return(cin.function) + if(txtOnly) { + return(character(0L)) + } else { + return(cin.function) + } } # create function formals(cin.function) <- alist(.x.=, ...=) - BODY.txt <- paste("{\nout <- rep(NA, ", length(ineq.idx), ")\n", sep="") + if(txtOnly) { + BODY.txt <- "" + } else { + BODY.txt <- paste("{\nout <- rep(NA, ", length(ineq.idx), ")\n", sep="") + } # first come the variable definitions DEF.txt <- lav_partable_constraints_def(partable, txtOnly=TRUE) @@ -336,6 +345,9 @@ BODY.txt <- paste(BODY.txt, "out[", i, "] <- ", ineq.string, "\n", sep="") } + + if(txtOnly) return(BODY.txt) + # put the results in 'out' #BODY.txt <- paste(BODY.txt, "\nout <- ", # paste("c(", paste(lhs.names, collapse=","),")\n", sep=""), sep="") diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_flat.R r-cran-lavaan-0.5.23.1097/R/lav_partable_flat.R --- r-cran-lavaan-0.5.22/R/lav_partable_flat.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_flat.R 2017-02-20 15:16:25.000000000 +0000 @@ -0,0 +1,545 @@ + +lav_partable_flat <- function(FLAT = NULL, + blocks = "group", + block.id = NULL, + meanstructure = FALSE, + int.ov.free = FALSE, + int.lv.free = FALSE, + orthogonal = FALSE, + std.lv = FALSE, + conditional.x = FALSE, + fixed.x = TRUE, + parameterization = "delta", + auto.fix.first = FALSE, + auto.fix.single = FALSE, + auto.var = FALSE, + auto.cov.lv.x = FALSE, + auto.cov.y = FALSE, + auto.th = FALSE, + auto.delta = FALSE, + varTable = NULL, + group.equal = NULL, + group.w.free = FALSE, + ngroups = 1L) { + + categorical <- FALSE + + ### DEFAULT elements: parameters that are typically not specified by + ### users, but should typically be considered, + ### either free or fixed + + # extract `names' of various types of variables: + lv.names <- lav_partable_vnames(FLAT, type="lv") # latent variables + #lv.names.r <- lav_partable_vnames(FLAT, type="lv.regular") # regular latent variables + lv.names.f <- lav_partable_vnames(FLAT, type="lv.formative") # formative latent variables + ov.names <- lav_partable_vnames(FLAT, type="ov") # observed variables + ov.names.x <- lav_partable_vnames(FLAT, type="ov.x") # exogenous x covariates + ov.names.nox <- lav_partable_vnames(FLAT, type="ov.nox") + lv.names.x <- lav_partable_vnames(FLAT, type="lv.x") # exogenous lv + ov.names.y <- lav_partable_vnames(FLAT, type="ov.y") # dependent ov + lv.names.y <- lav_partable_vnames(FLAT, type="lv.y") # dependent lv + #lvov.names.y <- c(ov.names.y, lv.names.y) + lvov.names.y <- c(lv.names.y, ov.names.y) + + + # get 'ordered' variables, either from FLAT or varTable + ov.names.ord1 <- lav_partable_vnames(FLAT, type="ov.ord") + # check if we have "|" for exogenous variables + if(length(ov.names.ord1) > 0L) { + idx <- which(ov.names.ord1 %in% ov.names.x) + if(length(idx) > 0L) { + warning("lavaan WARNING: thresholds are defined for exogenous variables: ", paste(ov.names.ord1[idx], collapse=" ")) + } + } + + if(!is.null(varTable)) { + ov.names.ord2 <- as.character(varTable$name[ varTable$type == "ordered" ]) + # remove fixed.x variables + idx <- which(ov.names.ord2 %in% ov.names.x) + if(length(idx) > 0L) { + ov.names.ord2 <- ov.names.ord2[-idx] + } + + # remove those that do appear in the model syntax + idx <- which(!ov.names.ord2 %in% ov.names) + if(length(idx) > 0L) { + ov.names.ord2 <- ov.names.ord2[-idx] + } + } else { + ov.names.ord2 <- character(0) + } + #### FIXME!!!!! ORDER! + ov.names.ord <- unique(c(ov.names.ord1, ov.names.ord2)) + + # if we have the "|" in the model syntax, check the number of thresholds + if(!is.null(varTable) && length(ov.names.ord1) > 0L) { + for(o in ov.names.ord1) { + nth <- varTable$nlev[ varTable$name == o ] - 1L + nth.in.partable <- sum(FLAT$op == "|" & FLAT$lhs == o) + if(nth != nth.in.partable) { + stop("lavaan ERROR: expected ", nth, + " threshold(s) for variable ", + sQuote(o), "; syntax contains ", nth.in.partable, "\n") + } + } + } + + if(length(ov.names.ord) > 0L) + categorical <- TRUE + + lhs <- rhs <- character(0) + + # 1. THRESHOLDS (based on varTable) + # NOTE: - new in 0.5-18: ALWAYS include threshold parameters in partable, + # but only free them if auto.th = TRUE + # - only ov.names.ord2, because ov.names.ord1 are already in USER + # and we only need to add 'default' parameters here + nth <- 0L + #if(auto.th && length(ov.names.ord2) > 0L) { + if(length(ov.names.ord2) > 0L) { + for(o in ov.names.ord2) { + nth <- varTable$nlev[ varTable$name == o ] - 1L + if(nth < 1L) next + lhs <- c(lhs, rep(o, nth)) + rhs <- c(rhs, paste("t", seq_len(nth), sep="")) + } + nth <- length(lhs) + } + + # 2. default (residual) variances and covariances + + # a) (residual) VARIANCES (all ov's except exo, and all lv's) + # NOTE: change since 0.5-17: we ALWAYS include the vars in the + # parameter table; but only if auto.var = TRUE, we set them free + #if(auto.var) { + ov.var <- ov.names.nox + # auto-remove ordinal variables + #idx <- match(ov.names.ord, ov.var) + #if(length(idx)) ov.var <- ov.var[-idx] + lhs <- c(lhs, ov.var, lv.names) + rhs <- c(rhs, ov.var, lv.names) + #} + + # b) `independent` latent variable COVARIANCES (lv.names.x) + if(auto.cov.lv.x && length(lv.names.x) > 1L) { + tmp <- utils::combn(lv.names.x, 2) + lhs <- c(lhs, tmp[1,]) # to fill upper.tri + rhs <- c(rhs, tmp[2,]) + } + + # c) `dependent` latent variables COVARIANCES (lv.y.idx + ov.y.lv.idx) + if(auto.cov.y && length(lvov.names.y) > 1L) { + tmp <- utils::combn(lvov.names.y, 2L) + lhs <- c(lhs, tmp[1,]) # to fill upper.tri + rhs <- c(rhs, tmp[2,]) + } + + # d) exogenous x covariates: VARIANCES + COVARIANCES + if(!conditional.x && (nx <- length(ov.names.x)) > 0L) { + idx <- lower.tri(matrix(0, nx, nx), diag=TRUE) + lhs <- c(lhs, rep(ov.names.x, each=nx)[idx]) # fill upper.tri + rhs <- c(rhs, rep(ov.names.x, times=nx)[idx]) + } + + # create 'op' (thresholds come first, then variances) + op <- rep("~~", length(lhs)); op[seq_len(nth)] <- "|" + + # LATENT RESPONSE SCALES (DELTA) + # NOTE: - new in 0.5-19: ALWAYS include scaling parameters in partable, + # but only free them if auto.delta = TRUE (and parameterization + # is "delta" + #if(auto.delta && auto.th && length(ov.names.ord) > 0L && + # # length(lv.names) > 0L && + # (ngroups > 1L || any(FLAT$op == "~*~") || parameterization == "theta")) { + if(length(ov.names.ord) > 0L) { + lhs <- c(lhs, ov.names.ord) + rhs <- c(rhs, ov.names.ord) + op <- c(op, rep("~*~", length(ov.names.ord))) + } + + # 3. INTERCEPTS + if(meanstructure) { + if(conditional.x) { + ov.int <- ov.names.nox + } else { + ov.int <- ov.names + } + # auto-remove ordinal variables + #idx <- which(ov.int %in% ov.names.ord) + #if(length(idx)) ov.int <- ov.int[-idx] + + int.lhs <- c(ov.int, lv.names) + lhs <- c(lhs, int.lhs) + rhs <- c(rhs, rep("", length(int.lhs))) + op <- c(op, rep("~1", length(int.lhs))) + } + + # free group weights + if(group.w.free) { + lhs <- c(lhs, "group") + rhs <- c(rhs, "w") + op <- c(op, "%") + } + + DEFAULT <- data.frame(lhs=lhs, op=op, rhs=rhs, + mod.idx=rep(0L, length(lhs)), + stringsAsFactors=FALSE) + + + # 4. USER: user-specified elements + lhs <- FLAT$lhs + op <- FLAT$op + rhs <- FLAT$rhs + mod.idx <- FLAT$mod.idx + + lv.names <- lav_partable_vnames(FLAT, type="lv") # latent variables + ov.names <- lav_partable_vnames(FLAT, type="ov") # observed variables + + # check order of covariances: we only fill the upper.tri! + cov.idx <- which(op == "~~" & lhs != rhs) + for(i in cov.idx) { + lv.ov.names <- c(lv.names, ov.names) ### FIXME!!! OK?? + lv.idx <- match(c(lhs[i], rhs[i]), lv.ov.names) + if(lv.idx[1] > lv.idx[2]) { # swap! + tmp <- lhs[i]; lhs[i] <- rhs[i]; rhs[i] <- tmp + } + if(lhs[i] %in% lv.names && rhs[i] %in% lv.names) { + lv.idx <- match(c(lhs[i], rhs[i]), lv.names) + if(lv.idx[1] > lv.idx[2]) { # swap! + tmp <- lhs[i]; lhs[i] <- rhs[i]; rhs[i] <- tmp + } + } else if(lhs[i] %in% ov.names && rhs[i] %in% ov.names) { + ov.idx <- match(c(lhs[i], rhs[i]), ov.names) + if(ov.idx[1] > ov.idx[2]) { # swap! + tmp <- lhs[i]; lhs[i] <- rhs[i]; rhs[i] <- tmp + } + } else { # mixed!! # we allow this since 0.4-10 + lv.ov.names <- c(lv.names, ov.names) ### FIXME!!! OK?? + lv.idx <- match(c(lhs[i], rhs[i]), lv.ov.names) + if(lv.idx[1] > lv.idx[2]) { # swap! + tmp <- lhs[i]; lhs[i] <- rhs[i]; rhs[i] <- tmp + } + } + } + + USER <- data.frame(lhs=lhs, op=op, rhs=rhs, mod.idx=mod.idx, + stringsAsFactors=FALSE) + + # check for duplicated elements in USER + TMP <- USER[,1:3] + idx <- which(duplicated(TMP)) + if(length(idx) > 0L) { + txt <- sapply(1:length(idx), function(i) { + paste(" ", TMP[idx[i],"lhs"], + TMP[idx[i], "op"], + TMP[idx[i],"rhs"]) }) + warning("duplicated elements in model syntax have been ignored:\n", + paste(txt, collapse = "\n")) + USER <- USER[-idx,] + } + + # check for duplicated elements in DEFAULT + # - FIXME: can we not avoid this somehow?? + # - for example, if the user model includes 'x1 ~~ x1' + # or 'x1 ~ 1' + # - remove them from DEFAULT + TMP <- rbind(DEFAULT[,1:3], USER[,1:3]) + idx <- which(duplicated(TMP, fromLast=TRUE)) # idx should be in DEFAULT + if(length(idx)) { + for(i in idx) { + flat.idx <- which(USER$lhs == DEFAULT$lhs[i] & + USER$op == DEFAULT$op[i] & + USER$rhs == DEFAULT$rhs[i]) + if(length(flat.idx) != 1L) { + cat("[lavaan DEBUG] idx in TMP: i = ", i, "\n"); print(TMP[i,]) + cat("[lavaan DEBUG] idx in DEFAULT: i = ", i, "\n"); print(DEFAULT[i,]) + cat("[lavaan DEBUG] flat.idx:"); print(flat.idx) + } + } + DEFAULT <- DEFAULT[-idx,] + } + + # now that we have removed all duplicated elements, we can construct + # the LIST for a single group/block + lhs <- c(USER$lhs, DEFAULT$lhs) + op <- c(USER$op, DEFAULT$op) + rhs <- c(USER$rhs, DEFAULT$rhs) + user <- c(rep(1L, length(USER$lhs)), + rep(0L, length(DEFAULT$lhs))) + mod.idx <- c(USER$mod.idx, DEFAULT$mod.idx) + free <- rep(1L, length(lhs)) + ustart <- rep(as.numeric(NA), length(lhs)) + #label <- paste(lhs, op, rhs, sep="") + label <- rep(character(1), length(lhs)) + exo <- rep(0L, length(lhs)) + + # 0a. if auto.th = FALSE, set fix the thresholds + if(!auto.th) { + th.idx <- which(op == "|" & user == 0L) + free[th.idx] <- 0L + } + + # 0b. if auto.var = FALSE, set the unspecified variances to zero + if(!auto.var) { + var.idx <- which(op == "~~" & + lhs == rhs & + user == 0L) + ustart[var.idx] <- 0.0 + free[var.idx] <- 0L + } else { + # 'formative' (residual) variances are set to zero by default + var.idx <- which(op == "~~" & + lhs == rhs & + lhs %in% lv.names.f & + user == 0L) + ustart[var.idx] <- 0.0 + free[var.idx] <- 0L + } + + + # 1. fix metric of regular latent variables + if(std.lv) { + # fix metric by fixing the variance of the latent variable + lv.var.idx <- which(op == "~~" & + lhs %in% lv.names & lhs == rhs) + ustart[lv.var.idx] <- 1.0 + free[lv.var.idx] <- 0L + } + if(auto.fix.first) { + # fix metric by fixing the loading of the first indicator + mm.idx <- which(op == "=~") + first.idx <- mm.idx[which(!duplicated(lhs[mm.idx]))] + ustart[first.idx] <- 1.0 + free[first.idx] <- 0L + } + + # 2. fix residual variance of single indicators to zero + if(auto.var && auto.fix.single) { + mm.idx <- which(op == "=~") + T <- table(lhs[mm.idx]) + if(any(T == 1L)) { + # ok, we have a LV with only a single indicator + lv.names.single <- names(T)[T == 1L] + # get corresponding indicator if unique + lhs.mm <- lhs[mm.idx]; rhs.mm <- rhs[mm.idx] + single.ind <- rhs.mm[which(lhs.mm %in% lv.names.single & + !(duplicated(rhs.mm) | + duplicated(rhs.mm, fromLast=TRUE)))] + # is the indicator unique? + if(length(single.ind)) { + var.idx <- which(op == "~~" & lhs %in% single.ind + & rhs %in% single.ind + & lhs == rhs + & user == 0L) + ustart[var.idx] <- 0.0 + free[var.idx] <- 0L + } + } + } + + # 3. orthogonal=TRUE? + if(orthogonal) { + # FIXME: only lv.x.idx for now + lv.cov.idx <- which(op == "~~" & + lhs %in% lv.names & + lhs != rhs & + user == 0L) + ustart[lv.cov.idx] <- 0.0 + free[lv.cov.idx] <- 0L + } + + # 4. intercepts + if(meanstructure) { + if(categorical) { + # zero intercepts/means ordinal variables + ov.int.idx <- which(op == "~1" & + lhs %in% ov.names.ord & + user == 0L) + ustart[ov.int.idx] <- 0.0 + free[ov.int.idx] <- 0L + } + if(int.ov.free == FALSE) { + # zero intercepts/means observed variables + ov.int.idx <- which(op == "~1" & + lhs %in% ov.names & + user == 0L) + ustart[ov.int.idx] <- 0.0 + free[ov.int.idx] <- 0L + } + if(int.lv.free == FALSE) { + # zero intercepts/means latent variables + lv.int.idx <- which(op == "~1" & + lhs %in% lv.names & + user == 0L) + ustart[lv.int.idx] <- 0.0 + free[lv.int.idx] <- 0L + } + } + + # 5. handle exogenous `x' covariates + if(length(ov.names.x) > 0) { + + # 1. variances/covariances + exo.var.idx <- which(op == "~~" & + rhs %in% ov.names.x & + user == 0L) + if(fixed.x) { + ustart[exo.var.idx] <- as.numeric(NA) # should be overriden later! + free[exo.var.idx] <- 0L + exo[exo.var.idx] <- 1L + } else if(conditional.x) { + exo[exo.var.idx] <- 1L + } + + # 2. intercepts + exo.int.idx <- which(op == "~1" & + lhs %in% ov.names.x & + user == 0L) + if(fixed.x) { + ustart[exo.int.idx] <- as.numeric(NA) # should be overriden later! + free[exo.int.idx] <- 0L + exo[exo.int.idx] <- 1L + } else if(conditional.x) { + exo[exo.int.idx] <- 1L + } + + # 3. regressions ov + lv + exo.reg.idx <- which(op == "~" & + lhs %in% c(lv.names, ov.names.nox) & + rhs %in% ov.names.x) + if(conditional.x) { + exo[exo.reg.idx] <- 1L + } + } + + # 5b. residual variances of ordinal variables? + if(length(ov.names.ord) > 0L) { + ord.idx <- which(lhs %in% ov.names.ord & + op == "~~" & + lhs == rhs) + ustart[ord.idx] <- 1L ## FIXME!! or 0?? (0 breaks ex3.12) + free[ord.idx] <- 0L + } + + # 5c latent response scales of ordinal variables? + # by default, all fixed to 1.0 + if(length(ov.names.ord) > 0L) { + delta.idx <- which(op == "~*~") + ustart[delta.idx] <- 1.0 + free[delta.idx] <- 0L + } + + # group proportions (group 1L) + if(group.w.free) { + group.idx <- which(lhs == "group" & op == "%") + #if(ngroups > 1L) { + free[ group.idx ] <- 1L + ustart[ group.idx ] <- as.numeric(NA) + #} else { + # free[ group.idx ] <- 0L + # ustart[ group.idx ] <- 0.0 # last group + #} + } + + # 6. multiple groups? + group <- rep(1L, length(lhs)) + if(ngroups > 1) { + group <- rep(1:ngroups, each=length(lhs)) + user <- rep(user, times=ngroups) + lhs <- rep(lhs, times=ngroups) + op <- rep(op, times=ngroups) + rhs <- rep(rhs, times=ngroups) + free <- rep(free, times=ngroups) + ustart <- rep(ustart, times=ngroups) + mod.idx <- rep(mod.idx, times=ngroups) + label <- rep(label, times=ngroups) + exo <- rep(exo, times=ngroups) + + # specific changes per group + for(g in 2:ngroups) { + # label + # label[group == g] <- paste(label[group == 1], ".g", g, sep="") + + # free/fix intercepts + if(meanstructure) { + int.idx <- which(op == "~1" & + lhs %in% lv.names & + user == 0L & + group == g) + if(int.lv.free == FALSE && g > 1 && + ("intercepts" %in% group.equal || + "thresholds" %in% group.equal) && + !("means" %in% group.equal) ) { + free[ int.idx ] <- 1L + ustart[ int.idx ] <- as.numeric(NA) + } + } + + # latent response scaling + if(auto.delta && parameterization == "delta") { + if(any(op == "~*~" & group == g) && + ("thresholds" %in% group.equal)) { + delta.idx <- which(op == "~*~" & group == g) + free[ delta.idx ] <- 1L + ustart[ delta.idx ] <- as.numeric(NA) + } + } else if(parameterization == "theta") { + if(any(op == "~*~" & group == g) && + ("thresholds" %in% group.equal)) { + var.ord.idx <- which(op == "~~" & group == g & + lhs %in% ov.names.ord & lhs == rhs) + free[ var.ord.idx ] <- 1L + ustart[ var.ord.idx ] <- as.numeric(NA) + } + } + + # group proportions + if(group.w.free) { + group.idx <- which(lhs == "group" & op == "%" & group == g) + #if(g == ngroups) { + # free[ group.idx ] <- 0L + # ustart[ group.idx ] <- 0.0 # last group + #} else { + free[ group.idx ] <- 1L + ustart[ group.idx ] <- as.numeric(NA) + #} + } + } # g + } # ngroups + + # construct LIST + LIST <- list( id = seq_along(lhs), + lhs = lhs, + op = op, + rhs = rhs, + user = user) + + # add block column (before group/level columns) + if(!is.null(block.id)) { + # only one block + LIST$block <- rep(block.id, length(lhs)) + } else { + # block is a combination of at least group, level, ... + # for now, only group + LIST$block <- group + } + + # block columns (typically only group) + for(block in blocks) { + if(block == "group") { + LIST[[ block ]] <- group + } else { + LIST[[block]] <- rep(0L, length(lhs)) + } + } + + # other columns + LIST2 <- list(mod.idx = mod.idx, + free = free, + ustart = ustart, + exo = exo, + label = label) + + LIST <- c(LIST, LIST2) +} + diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_full.R r-cran-lavaan-0.5.23.1097/R/lav_partable_full.R --- r-cran-lavaan-0.5.22/R/lav_partable_full.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_full.R 2017-02-21 09:10:22.000000000 +0000 @@ -0,0 +1,174 @@ +# create `full' parameter table, containing (almost) all parameters +# that could be free +# +# main motivation: univariate scores tests (modification indices) +# +lav_partable_full <- function(partable = NULL, + strict.exo = FALSE, + free = FALSE, start = FALSE) { + + # check minimum requirements: lhs, op, rhs + stopifnot( !is.null(partable$lhs), + !is.null(partable$op), + !is.null(partable$rhs) ) + + # meanstructure + meanstructure <- any(partable$op == "~1") + + # number of blocks + nblocks <- lav_partable_nblocks(partable) + + # extract `names' of various types of variables: + lv.names <- lav_partable_vnames(partable, type="lv") + ov.names <- lav_partable_vnames(partable, type="ov") + ov.names.x <- lav_partable_vnames(partable, type="ov.x") + ov.names.nox <- lav_partable_vnames(partable, type="ov.nox") + lv.names.x <- lav_partable_vnames(partable, type="lv.x") + ov.names.y <- lav_partable_vnames(partable, type="ov.y") + lv.names.y <- lav_partable_vnames(partable, type="lv.y") + lvov.names.y <- c(ov.names.y, lv.names.y) + ov.names.ord <- lav_partable_vnames(partable, type="ov.ord") + ov.names.ind <- lav_partable_vnames(partable, type="ov.ind") + + # 1 "=~" + l.lhs <- r.rhs <- op <- character(0) + l.lhs <- rep(lv.names, each=length(ov.names.nox)) + l.rhs <- rep(ov.names.nox, times=length(lv.names)) + + # remove factor ~ eqs.y combinations, if any + # because they also appear as a regression + #bad.idx <- which( l.lhs %in% lv.names & + # l.rhs %in% eqs.y) + #if(length(bad.idx) > 0L) { + # l.lhs <- l.lhs[-bad.idx] + # l.rhs <- l.rhs[-bad.idx] + #} + + l.op <- rep("=~", length(l.lhs)) + + # 2a. "~~" ov ## FIXME: ov.names.nox or ov.names?? + ov.lhs <- ov.rhs <- ov.op <- character(0) + #if(strict.exo) { + OV <- ov.names.nox + #} else { + # OV <- ov.names + #} + nx <- length(OV) + idx <- lower.tri(matrix(0, nx, nx), diag=TRUE) + ov.lhs <- rep(OV, each=nx)[idx] # fill upper.tri + ov.rhs <- rep(OV, times=nx)[idx] + ov.op <- rep("~~", length(ov.lhs)) + + # exo ~~ + if(!strict.exo && length(ov.names.x) > 0L) { + OV <- ov.names.x + nx <- length(OV) + idx <- lower.tri(matrix(0, nx, nx), diag=TRUE) + more.lhs <- rep(OV, each=nx)[idx] # fill upper.tri + more.rhs <- rep(OV, times=nx)[idx] + ov.lhs <- c(ov.lhs, more.lhs) + ov.rhs <- c(ov.rhs, more.rhs) + ov.op <- c(ov.op, rep("~~", length(more.lhs))) + } + + # 2b. "~~" lv + lv.lhs <- lv.rhs <- lv.op <- character(0) + nx <- length(lv.names) + idx <- lower.tri(matrix(0, nx, nx), diag=TRUE) + lv.lhs <- rep(lv.names, each=nx)[idx] # fill upper.tri + lv.rhs <- rep(lv.names, times=nx)[idx] + lv.op <- rep("~~", length(lv.lhs)) + + # 3 regressions? + r.lhs <- r.rhs <- r.op <- character(0) + if(any(partable$op == "~")) { + + eqs.names <- unique( c(partable$lhs[partable$op == "~"], + partable$rhs[partable$op == "~"]) ) + + eqs.y <- eqs.names + if(strict.exo) { + x.idx <- which(eqs.names %in% ov.names.x) + if(length(x.idx) > 0L) { + eqs.y <- eqs.names[-x.idx] + } + } + eqs.x <- eqs.names + + r.lhs <- rep(eqs.y, each=length(eqs.x)) + r.rhs <- rep(eqs.x, times=length(eqs.y)) + + # remove self-arrows + idx <- which(r.lhs == r.rhs) + r.lhs <- r.lhs[-idx] + r.rhs <- r.rhs[-idx] + + # remove indicator ~ factor if they exist + bad.idx <- which(r.lhs %in% ov.names.ind & + r.rhs %in% lv.names) + if(length(bad.idx) > 0L) { + r.lhs <- r.lhs[-bad.idx] + r.rhs <- r.rhs[-bad.idx] + } + + r.op <- rep("~", length(r.rhs)) + } + + # 4. intercepts + int.lhs <- int.rhs <- int.op <- character(0) + if(meanstructure) { + if(strict.exo) { + int.lhs <- c(ov.names.nox, lv.names) + } else { + int.lhs <- c(ov.names, lv.names) + } + int.rhs <- rep("", length(int.lhs)) + int.op <- rep("~1", length(int.lhs)) + } + + # 5. thresholds + th.lhs <- th.rhs <- th.op <- character(0) + if(length(ov.names.ord) > 0L) { + tmp <- strsplit(lav_partable_vnames(partable, "th"), "\\|") + th.lhs <- sapply(tmp, function(x) x[1]) + th.rhs <- sapply(tmp, function(x) x[2]) + th.op <- rep("|", length(th.lhs)) + } + + # 6. scaling parameters + delta.lhs <- delta.rhs <- delta.op <- character(0) + if(nblocks > 1L && length(ov.names.ord) > 0L) { + delta.lhs <- ov.names.ord + delta.rhs <- ov.names.ord + delta.op <- rep("~*~", length(delta.lhs)) + } + + # combine + lhs <- c(l.lhs, ov.lhs, lv.lhs, r.lhs, int.lhs, th.lhs, delta.lhs) + rhs <- c(l.rhs, ov.rhs, lv.rhs, r.rhs, int.rhs, th.rhs, delta.rhs) + op <- c(l.op, ov.op, lv.op, r.op, int.op, th.op, delta.op) + + + # multiple blocks! + block <- 1L + if(nblocks > 1) { + block <- rep(1:nblocks, each = length(lhs)) + lhs <- rep(lhs, times = nblocks) + op <- rep(op, times = nblocks) + rhs <- rep(rhs, times = nblocks) + } + + LIST <- data.frame(lhs = lhs, op = op, rhs = rhs, block = block, + stringsAsFactors = FALSE) + + if(free) { + LIST$free <- rep(0L, nrow(LIST)) + } + + if(start) { + LIST$start <- rep(0, nrow(LIST)) + } + + LIST +} + diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_independence.R r-cran-lavaan-0.5.23.1097/R/lav_partable_independence.R --- r-cran-lavaan-0.5.22/R/lav_partable_independence.R 2016-03-27 17:58:18.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_independence.R 2017-02-20 19:09:13.000000000 +0000 @@ -273,6 +273,7 @@ op = op, rhs = rhs, user = rep(1L, length(lhs)), + block = group, # for now group = group, mod.idx = rep(0L, length(lhs)), free = free, diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_labels.R r-cran-lavaan-0.5.23.1097/R/lav_partable_labels.R --- r-cran-lavaan-0.5.22/R/lav_partable_labels.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_labels.R 2017-02-21 09:40:47.000000000 +0000 @@ -0,0 +1,154 @@ +# generate labels for each parameter +lav_partable_labels <- function(partable, + blocks = "group", + group.equal = "", group.partial = "", + type = "user") { + + # catch empty partable + if(length(partable$lhs) == 0L) return(character(0L)) + + # default labels + label <- paste(partable$lhs, partable$op, partable$rhs, sep="") + + # handle multiple groups + if("group" %in% blocks) { + if(is.character(partable$group)) { + group.label <- unique(partable$group) + group.label <- group.label[ nchar(group.label) > 0L ] + ngroups <- length(group.label) + } else { + ngroups <- lav_partable_ngroups(partable) + group.label <- 1:ngroups + } + if(ngroups > 1L) { + for(g in 2:ngroups) { + label[partable$group == group.label[g]] <- + paste(label[partable$group == group.label[g]], + ".g", g, sep="") + } + } + } else { + ngroups <- 1L + } + + #cat("DEBUG: label start:\n"); print(label); cat("\n") + #cat("group.equal = ", group.equal, "\n") + #cat("group.partial = ", group.partial, "\n") + + # use group.equal so that equal sets of parameters get the same label + if(ngroups > 1L && length(group.equal) > 0L) { + + if("intercepts" %in% group.equal || + "residuals" %in% group.equal || + "residual.covariances" %in% group.equal) { + ov.names.nox <- vector("list", length=ngroups) + for(g in 1:ngroups) + ov.names.nox[[g]] <- lav_partable_vnames(partable, "ov.nox", group=g) + } + if("thresholds" %in% group.equal) { + ov.names.ord <- vector("list", length=ngroups) + for(g in 1:ngroups) + ov.names.ord[[g]] <- lav_partable_vnames(partable, "ov.ord", group=g) + } + if("means" %in% group.equal || + "lv.variances" %in% group.equal || + "lv.covariances" %in% group.equal) { + lv.names <- vector("list", length=ngroups) + for(g in 1:ngroups) + lv.names[[g]] <- lav_partable_vnames(partable, "lv", group=g) + } + + # g1.flag: TRUE if included, FALSE if not + g1.flag <- logical(length(which(partable$group == 1L))) + + # LOADINGS + if("loadings" %in% group.equal) + g1.flag[ partable$op == "=~" & partable$group == 1L ] <- TRUE + # INTERCEPTS (OV) + if("intercepts" %in% group.equal) + g1.flag[ partable$op == "~1" & partable$group == 1L & + partable$lhs %in% ov.names.nox[[1L]] ] <- TRUE + # THRESHOLDS (OV-ORD) + if("thresholds" %in% group.equal) + g1.flag[ partable$op == "|" & partable$group == 1L & + partable$lhs %in% ov.names.ord[[1L]] ] <- TRUE + # MEANS (LV) + if("means" %in% group.equal) + g1.flag[ partable$op == "~1" & partable$group == 1L & + partable$lhs %in% lv.names[[1L]] ] <- TRUE + # REGRESSIONS + if("regressions" %in% group.equal) + g1.flag[ partable$op == "~" & partable$group == 1L ] <- TRUE + # RESIDUAL variances (FIXME: OV ONLY!) + if("residuals" %in% group.equal) + g1.flag[ partable$op == "~~" & partable$group == 1L & + partable$lhs %in% ov.names.nox[[1L]] & + partable$lhs == partable$rhs ] <- TRUE + # RESIDUAL covariances (FIXME: OV ONLY!) + if("residual.covariances" %in% group.equal) + g1.flag[ partable$op == "~~" & partable$group == 1L & + partable$lhs %in% ov.names.nox[[1L]] & + partable$lhs != partable$rhs ] <- TRUE + # LV VARIANCES + if("lv.variances" %in% group.equal) + g1.flag[ partable$op == "~~" & partable$group == 1L & + partable$lhs %in% lv.names[[1L]] & + partable$lhs == partable$rhs ] <- TRUE + # LV COVARIANCES + if("lv.covariances" %in% group.equal) + g1.flag[ partable$op == "~~" & partable$group == 1L & + partable$lhs %in% lv.names[[1L]] & + partable$lhs != partable$rhs ] <- TRUE + + # if group.partial, set corresponding flag to FALSE + if(length(group.partial) > 0L) { + g1.flag[ label %in% group.partial & + partable$group == 1L ] <- FALSE + } + + # for each (constrained) parameter in 'group 1', find a similar one + # in the other groups (we assume here that the models need + # NOT be the same across groups! + g1.idx <- which(g1.flag) + for(i in 1:length(g1.idx)) { + ref.idx <- g1.idx[i] + idx <- which(partable$lhs == partable$lhs[ref.idx] & + partable$op == partable$op[ ref.idx] & + partable$rhs == partable$rhs[ref.idx] & + partable$group > 1L) + label[idx] <- label[ref.idx] + } + } + + #cat("DEBUG: g1.idx = ", g1.idx, "\n") + #cat("DEBUG: label after group.equal:\n"); print(label); cat("\n") + + # handle other block identifier (not 'group') + for(block in blocks) { + if(block == "group") { + next + } + label <- paste(label, ".", partable[[block]], sep = "") + } + + # user-specified labels -- override everything!! + user.idx <- which(nchar(partable$label) > 0L) + label[user.idx] <- partable$label[user.idx] + + #cat("DEBUG: user.idx = ", user.idx, "\n") + #cat("DEBUG: label after user.idx:\n"); print(label); cat("\n") + + # which labels do we need? + if(type == "user") { + idx <- 1:length(label) + } else if(type == "free") { + idx <- which(partable$free > 0L & !duplicated(partable$free)) + #} else if(type == "unco") { + # idx <- which(partable$unco > 0L & !duplicated(partable$unco)) + } else { + stop("argument `type' must be one of free or user") + } + + label[idx] +} + diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_merge.R r-cran-lavaan-0.5.23.1097/R/lav_partable_merge.R --- r-cran-lavaan-0.5.22/R/lav_partable_merge.R 2015-03-05 14:27:52.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_merge.R 2017-02-21 18:20:11.000000000 +0000 @@ -12,23 +12,39 @@ stopifnot( !is.null(pt1$lhs), !is.null(pt1$op), !is.null(pt1$rhs), !is.null(pt2$lhs), !is.null(pt2$op), !is.null(pt2$rhs) ) - # both should have group (or not) - if(is.null(pt1$group) && is.null(pt2$group)) { - TMP <- rbind(pt1[, c("lhs","op","rhs","group")], - pt2[, c("lhs","op","rhs","group")]) + # both should have block (or not) + if(is.null(pt1$block) && is.null(pt2$block)) { + pt1$block <- rep(1L, length(pt1$lhs)) + pt2$block <- rep(1L, length(pt2$lhs)) + TMP <- rbind(pt1[, c("lhs","op","rhs","block")], + pt2[, c("lhs","op","rhs","block")]) } else { - if(is.null(pt1$group) && !is.null(pt2$group)) { - pt1$group <- rep(1L, length(pt1$lhs)) - } else if(is.null(pt2$group) && !is.null(pt1$group)) { - pt2$group <- rep(1L, length(pt2$lhs)) + if(is.null(pt1$block) && !is.null(pt2$block)) { + pt1$block <- rep(1L, length(pt1$lhs)) + } else if(is.null(pt2$block) && !is.null(pt1$block)) { + pt2$block <- rep(1L, length(pt2$lhs)) } - TMP <- rbind(pt1[, c("lhs","op","rhs","group")], - pt2[, c("lhs","op","rhs","group")]) + TMP <- rbind(pt1[, c("lhs","op","rhs","block")], + pt2[, c("lhs","op","rhs","block")]) } # if missing columns, provide default values of the right type # (numeric/integer/character) + # group + if(is.null(pt1$group) && !is.null(pt2$group)) { + pt1$group <- rep(0L, length(pt1$lhs)) + } else if(is.null(pt2$group) && !is.null(pt1$group)) { + pt2$group <- rep(0L, length(pt2$lhs)) + } + + # level + if(is.null(pt1$level) && !is.null(pt2$level)) { + pt1$level <- rep(0L, length(pt1$lhs)) + } else if(is.null(pt2$level) && !is.null(pt1$level)) { + pt2$level <- rep(0L, length(pt2$lhs)) + } + # user if(is.null(pt1$user) && !is.null(pt2$user)) { pt1$user <- rep(0L, length(pt1$lhs)) @@ -111,7 +127,7 @@ idx <- which(pt2$lhs == pt1$lhs[i] & pt2$op == pt1$op[i] & pt2$rhs == pt1$rhs[i] & - pt2$group == pt1$group[i]) + pt2$block == pt1$block[i]) pt2$start[idx] <- pt1$start[i] } diff -Nru r-cran-lavaan-0.5.22/R/lav_partable.R r-cran-lavaan-0.5.23.1097/R/lav_partable.R --- r-cran-lavaan-0.5.22/R/lav_partable.R 2016-08-29 07:59:38.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable.R 2017-02-20 18:07:01.000000000 +0000 @@ -1,4 +1,4 @@ -# constructor for the lavParTable model description +# constructor for the ltavParTable model description # # initial version: YR 22/05/2009 # major revision: YR 02/11/2010: - FLATTEN the model syntax and turn it into a @@ -85,7 +85,7 @@ # of exogenous variables in model syntax (if fixed.x=TRUE) if(fixed.x) { # we ignore the groups here! # we only call this function for the warning message - tmp <- vnames(FLAT, "ov.x", warn=TRUE); rm(tmp) + tmp <- lav_partable_vnames(FLAT, "ov.x", warn = TRUE); rm(tmp) } # auto=TRUE? @@ -139,24 +139,15 @@ collapse = ", ")) } - # if multiple blocks, and ":rhs" is missing, fill in integers - # but only if there is just one type of block! - #if(length(BLOCK.lhs) == 1L) { - # block.idx <- which(FLAT$op == ":" & FLAT$lhs == BLOCK.lhs[1]) - # FLAT$rhs[block.idx][nchar(FLAT$rhs[block.idx]) == 0L] <- - # seq_along(block.idx) - #} else { - # no empty :rhs fields allowed! - if( any( nchar(FLAT$rhs[BLOCK.op.idx]) == 0L ) ) { - empty.idx <- nchar(FLAT$rhs[BLOCK.op.idx]) == 0L - txt <- paste(FLAT$lhs[BLOCK.op.idx][empty.idx], ":") - stop("lavaan ERROR: syntax contains block identifiers with ", - "missing numbers/labels:\n\t\t", txt) - } - #} - + # no empty :rhs fields allowed! + if( any( nchar(FLAT$rhs[BLOCK.op.idx]) == 0L ) ) { + empty.idx <- nchar(FLAT$rhs[BLOCK.op.idx]) == 0L + txt <- paste(FLAT$lhs[BLOCK.op.idx][empty.idx], ":") + stop("lavaan ERROR: syntax contains block identifiers with ", + "missing numbers/labels:\n\t\t", txt) + } - # check for 'group' + # check for 'group' (needed?) if("group" %in% BLOCK.lhs) { # how many group blocks? group.block.idx <- FLAT$op == ":" & FLAT$lhs == "group" @@ -173,21 +164,27 @@ FLAT <- as.data.frame(FLAT, stringsAsFactors = FALSE) BLOCK.op.idx <- c(BLOCK.op.idx, nrow(FLAT) + 1L) BLOCK.rhs <- rep("0", length(BLOCK.lhs)) + block.id <- 0L - for(g in seq_len(n.block.flat)) { + for(block in seq_len(n.block.flat)) { # fill BLOC.rhs value - block.lhs <- FLAT$lhs[BLOCK.op.idx[g]] - block.rhs <- FLAT$rhs[BLOCK.op.idx[g]] + block.lhs <- FLAT$lhs[BLOCK.op.idx[block]] + block.rhs <- FLAT$rhs[BLOCK.op.idx[block]] BLOCK.rhs[ which(block.lhs == BLOCK.lhs) ] <- block.rhs # another block identifier? - if(BLOCK.op.idx[g+1] - BLOCK.op.idx[g] == 1L) { + if(BLOCK.op.idx[block+1] - BLOCK.op.idx[block] == 1L) { next } + block.id <- block.id + 1L + - FLAT.block <- FLAT[(BLOCK.op.idx[g]+1L):(BLOCK.op.idx[g+1]-1L),] + FLAT.block <- FLAT[(BLOCK.op.idx[block]+1L):(BLOCK.op.idx[block+1]-1L),] + # rm 'block' column (if any) in FLAT.block + FLAT.block$block <- NULL LIST.block <- lav_partable_flat(FLAT.block, blocks = BLOCK.lhs, + block.id = block.id, meanstructure = meanstructure, int.ov.free = int.ov.free, int.lv.free = int.lv.free, orthogonal = orthogonal, std.lv = std.lv, @@ -429,58 +426,57 @@ # count free parameters idx.free <- which(LIST$free > 0) LIST$free[idx.free] <- seq_along(idx.free) + # backwards compatibility... if(!is.null(LIST$unco)) { LIST$unco[idx.free] <- seq_along(idx.free) } - # 2. add free counter to this element - #idx.equal <- which(LIST$eq.id > 0) - #LIST$free[idx.equal] <- LIST$free[ LIST$eq.id[idx.equal] ] - - # 3. which parameters would be free without equality constraints? - #idx.unco <- which(LIST$free > 0) - #LIST$unco[idx.unco] <- seq_along(idx.unco) # handle constraints (if any) (NOT per group, but overall - 0.4-11) if(length(CON) > 0L) { - #cat("DEBUG:\n"); print(CON) - CONLIST <- list() - CONLIST$id = length(LIST$id) + seq_len(length(CON)) - CONLIST$lhs = unlist(lapply(CON, "[[", "lhs")) - CONLIST$op = unlist(lapply(CON, "[[", "op")) - CONLIST$rhs = unlist(lapply(CON, "[[", "rhs")) - CONLIST$user = unlist(lapply(CON, "[[", "user")) + nCon <- length(CON) + IDX <- length(LIST$id) + seq_len(nCon) + # grow LIST with length(CON) extra rows + LIST <- lapply(LIST, function(x) { + if(is.character(x)) { + c(x, rep("", nCon)) + } else { + c(x, rep(NA, nCon)) + } }) - # add block columns with current values in BLOCK.rhs - if(n.block.flat > 0L) { - for(b in seq_len(length(BLOCK.lhs))) { - block.lhs <- BLOCK.lhs[b] - if(is.character(LIST[[block.lhs]])) { - CONLIST[[block.lhs]] <- rep("", length(CONLIST$lhs)) - } else { - # what shall we do: NA or 0L? - CONLIST[[block.lhs]] <- rep(as.integer(NA), - length(CONLIST$lhs)) - } + # fill in some columns + LIST$id[IDX] <- IDX + LIST$lhs[IDX] <- unlist(lapply(CON, "[[", "lhs")) + LIST$op[IDX] <- unlist(lapply(CON, "[[", "op")) + LIST$rhs[IDX] <- unlist(lapply(CON, "[[", "rhs")) + LIST$user[IDX] <- unlist(lapply(CON, "[[", "user")) + + # zero is nicer? + LIST$free[IDX] <- rep(0L, nCon) + LIST$exo[IDX] <- rep(0L, nCon) + LIST$block[IDX] <- rep(0L, nCon) + + if(!is.null(LIST$group)) { + if(is.character(LIST$group)) { + LIST$group[IDX] <- rep("", nCon) + } else { + LIST$group[IDX] <- rep(0L, nCon) + } + } + if(!is.null(LIST$level)) { + if(is.character(LIST$level)) { + LIST$level[IDX] <- rep("", nCon) + } else { + LIST$level[IDX] <- rep(0L, nCon) + } + } + if(!is.null(LIST$class)) { + if(is.character(LIST$class)) { + LIST$class[IDX] <- rep("", nCon) + } else { + LIST$class[IDX] <- rep(0L, nCon) } - } else { - CONLIST$group <- rep(0L, length(CONLIST$lhs)) } - - CONLIST$ustart <- rep(as.numeric(NA), length(CONLIST$lhs)) - #if(!is.null(LIST$prior)) { - # LIST$prior <- c(LIST$prior, rep("", length(lhs)) ) - #} - #LIST$plabel <- c(LIST$plabel, rep("", length(lhs)) ) - #if(!is.null(LIST$eq.id)) { - # LIST$eq.id <- c(LIST$eq.id, rep(0L, length(lhs)) ) - #} - #if(!is.null(LIST$unco)) { - # LIST$unco <- c(LIST$unco, rep(0L, length(lhs)) ) - #} - - # merge - LIST <- lav_partable_merge(LIST, CONLIST) } # put lhs of := elements in label column @@ -496,1099 +492,8 @@ # data.frame? if(as.data.frame.) { LIST <- as.data.frame(LIST, stringsAsFactors = FALSE) - - # order? first by 'op', then by 'user' - #LIST[with(LIST, order(op, -user)),] - } - - LIST -} - - -# lav_partable (old name: utils-user.R) -# -# functions to generate/compute/extract information from the lavaan -# `parameter table' -# -# YR. 29 june 2013 (as lav_partable) - -# user visible function to add 'matrix' entries in the parameter table -lavMatrixRepresentation <- function(partable, representation = "LISREL", - add.attributes = FALSE, - as.data.frame. = TRUE) { - - # check parameter table - partable <- lav_partable_complete(partable) - - # get model matrices - if(representation == "LISREL") { - REP <- representation.LISREL(partable, target = NULL, - extra = add.attributes) - } else { - stop("lavaan ERROR: only representation \"LISREL\" has been implemented.") - } - - partable$mat <- REP$mat - partable$row <- REP$row - partable$col <- REP$col - - if(as.data.frame.) { - partable <- as.data.frame(partable, stringsAsFactors=FALSE) - class(partable) <- c("lavaan.data.frame", "data.frame") - } - - if(add.attributes) { - attr(partable, "ov.dummy.names.nox") <- attr(REP, "ov.dummy.names.nox") - attr(partable, "ov.dummy.names.x") <- attr(REP, "ov.dummy.names.x") - attr(partable, "mmNames") <- attr(REP, "mmNames") - attr(partable, "mmNumber") <- attr(REP, "mmNumber") - attr(partable, "mmRows") <- attr(REP, "mmRows") - attr(partable, "mmCols") <- attr(REP, "mmCols") - attr(partable, "mmDimNames") <- attr(REP, "mmDimNames") - attr(partable, "mmSymmetric") <- attr(REP, "mmSymmetric") - } - - partable -} - - -# return 'attributes' of a lavaan partable -- generate a new set if necessary -lav_partable_attributes <- function(partable, pta=NULL) { - - if(is.null(pta)) { - # attached to partable? - pta <- attributes(partable) - if(!is.null(pta$vnames) && !is.null(pta$ngroups)) { - # looks like a pta - return(pta) - } else { - pta <- list() - } - } - - # vnames - pta$vnames <- lav_partable_vnames(partable, type="all", group="list") - - # vidx - OV <- pta$vnames$ov - LV <- pta$vnames$lv - ngroups <- length(pta$vnames$ov) - pta$vidx <- lapply(names(pta$vnames), function(v) { - lapply(seq_len(ngroups), function(g) { - if(grepl("lv", v)) { - match(pta$vnames[[v]][[g]], LV[[g]]) - } else if(grepl("th", v)) { - # thresholds have '|t' pattern - TH <- sapply(strsplit(pta$vnames[[v]][[g]], - "|t", fixed = TRUE), "[[", 1L) - match(TH, OV[[g]]) - } else if(grepl("eqs", v)){ - # mixture of OV/LV - integer(0L) - } else { - match(pta$vnames[[v]][[g]], OV[[g]]) - } - }) - }) - names(pta$vidx) <- names(pta$vnames) - - # ngroups - pta$ngroups <- ngroups - - # nvar - pta$nvar <- lapply(pta$vnames$ov, length) - - # nfac - pta$nfac <- lapply(pta$vnames$lv, length) - - # nfac.nonnormal - for numerical integration - pta$nfac.nonnormal <- lapply(pta$vnames$lv.nonnormal, length) - - pta -} - - -lav_partable_ov_idx <- function(partable, type="th", group=NULL) { - - stopifnot(is.list(partable), !missing(type), - type %in% c("th")) - - if(type == "th") { - ovn <- lav_partable_vnames(partable, type="ov.nox", group=group) - ov.num <- lav_partable_vnames(partable, type="ov.num", group=group) - th <- lav_partable_vnames(partable, type="th.mean", group=group) - th[th %in% ov.num] <- "__NUM__" - th1 <- gsub("\\|t[0-9]*","",th) - out <- match(th1, ovn) - out[is.na(out)] <- 0 - } - - out -} - -lav_partable_ndat <- function(partable, group=NULL) { - - # ngroups - if(is.null(partable$group)) { - partable$group <- rep(1L, length(partable$lhs)) - ngroups <- 1L - } else { - if(is.character(partable$group)) { - group.label <- unique(partable$group) - group.label <- group.label[ nchar(group.label) > 0L ] - ngroups <- length(group.label) - } else { - ngroups <- max(partable$group) - } - } - - meanstructure <- any(partable$op == "~1") - fixed.x <- any(partable$exo > 0L & partable$free == 0L) - conditional.x <- any(partable$exo > 0L & partable$op == "~") - categorical <- any(partable$op == "|") - if(categorical) meanstructure <- TRUE - - if(conditional.x) { - ov.names <- lav_partable_vnames(partable, "ov.nox", group=group) - } else { - ov.names <- lav_partable_vnames(partable, "ov", group=group) - } - nvar <- length(ov.names) - - pstar <- nvar*(nvar+1)/2; if(meanstructure) pstar <- pstar + nvar - ndat <- ngroups*pstar - - # correction for fixed.x? - if(!conditional.x && fixed.x) { - ov.names.x <- lav_partable_vnames(partable, "ov.x", group=group) - nvar.x <- length(ov.names.x) - pstar.x <- nvar.x * (nvar.x + 1) / 2 - if(meanstructure) pstar.x <- pstar.x + nvar.x - ndat <- ndat - (ngroups * pstar.x) - } - - # correction for ordinal data? - if(categorical) { - ov.names.x <- lav_partable_vnames(partable, "ov.x", group=group) - nexo <- length(ov.names.x) - ov.ord <- lav_partable_vnames(partable, "ov.ord", group=group) - nvar.ord <- length(ov.ord) - th <- lav_partable_vnames(partable, "th", group=group) - nth <- length(th) - # no variances - ndat <- ndat - (ngroups * nvar.ord) - # no means - ndat <- ndat - (ngroups * nvar.ord) - # but additional thresholds - ndat <- ndat + (ngroups * nth) - # add slopes - ndat <- ndat + (ngroups * nvar * nexo) - } - - # correction for conditional.x not categorical - if(conditional.x && !categorical) { - ov.names.x <- lav_partable_vnames(partable, "ov.x", group=group) - nexo <- length(ov.names.x) - # add slopes - ndat <- ndat + (ngroups * nvar * nexo) - } - - # correction for group proportions? - group.idx <- which(partable$lhs == "group" & - partable$op == "%") - if(length(group.idx) > 0L) { - # ndat <- ndat + (length(group.idx) - 1L) # G - 1 (sum to one) - ndat <- ndat + length(group.idx) # poisson: each cell a parameter - } - - ndat -} - -lav_partable_npar <- function(partable) { - npar <- max(partable$free) - npar -} - -lav_partable_df <- function(partable, group=NULL) { - - npar <- lav_partable_npar(partable) - ndat <- lav_partable_ndat(partable, group=group) - - # degrees of freedom - df <- ndat - npar - - as.integer(df) -} - -lav_partable_labels <- function(partable, - blocks = "group", - group.equal = "", group.partial = "", - type = "user") { - - # catch empty partable - if(length(partable$lhs) == 0L) return(character(0L)) - - # default labels - label <- paste(partable$lhs, partable$op, partable$rhs, sep="") - - # handle multiple groups - if("group" %in% blocks) { - if(is.character(partable$group)) { - group.label <- unique(partable$group) - group.label <- group.label[ nchar(group.label) > 0L ] - ngroups <- length(group.label) - } else { - ngroups <- max(partable$group) - group.label <- 1:ngroups - } - if(ngroups > 1L) { - for(g in 2:ngroups) { - label[partable$group == group.label[g]] <- - paste(label[partable$group == group.label[g]], - ".g", g, sep="") - } - } - } else { - ngroups <- 1L - } - - #cat("DEBUG: label start:\n"); print(label); cat("\n") - #cat("group.equal = ", group.equal, "\n") - #cat("group.partial = ", group.partial, "\n") - - # use group.equal so that equal sets of parameters get the same label - if(ngroups > 1L && length(group.equal) > 0L) { - - if("intercepts" %in% group.equal || - "residuals" %in% group.equal || - "residual.covariances" %in% group.equal) { - ov.names.nox <- vector("list", length=ngroups) - for(g in 1:ngroups) - ov.names.nox[[g]] <- lav_partable_vnames(partable, "ov.nox", group=g) - } - if("thresholds" %in% group.equal) { - ov.names.ord <- vector("list", length=ngroups) - for(g in 1:ngroups) - ov.names.ord[[g]] <- lav_partable_vnames(partable, "ov.ord", group=g) - } - if("means" %in% group.equal || - "lv.variances" %in% group.equal || - "lv.covariances" %in% group.equal) { - lv.names <- vector("list", length=ngroups) - for(g in 1:ngroups) - lv.names[[g]] <- lav_partable_vnames(partable, "lv", group=g) - } - - # g1.flag: TRUE if included, FALSE if not - g1.flag <- logical(length(which(partable$group == 1L))) - - # LOADINGS - if("loadings" %in% group.equal) - g1.flag[ partable$op == "=~" & partable$group == 1L ] <- TRUE - # INTERCEPTS (OV) - if("intercepts" %in% group.equal) - g1.flag[ partable$op == "~1" & partable$group == 1L & - partable$lhs %in% ov.names.nox[[1L]] ] <- TRUE - # THRESHOLDS (OV-ORD) - if("thresholds" %in% group.equal) - g1.flag[ partable$op == "|" & partable$group == 1L & - partable$lhs %in% ov.names.ord[[1L]] ] <- TRUE - # MEANS (LV) - if("means" %in% group.equal) - g1.flag[ partable$op == "~1" & partable$group == 1L & - partable$lhs %in% lv.names[[1L]] ] <- TRUE - # REGRESSIONS - if("regressions" %in% group.equal) - g1.flag[ partable$op == "~" & partable$group == 1L ] <- TRUE - # RESIDUAL variances (FIXME: OV ONLY!) - if("residuals" %in% group.equal) - g1.flag[ partable$op == "~~" & partable$group == 1L & - partable$lhs %in% ov.names.nox[[1L]] & - partable$lhs == partable$rhs ] <- TRUE - # RESIDUAL covariances (FIXME: OV ONLY!) - if("residual.covariances" %in% group.equal) - g1.flag[ partable$op == "~~" & partable$group == 1L & - partable$lhs %in% ov.names.nox[[1L]] & - partable$lhs != partable$rhs ] <- TRUE - # LV VARIANCES - if("lv.variances" %in% group.equal) - g1.flag[ partable$op == "~~" & partable$group == 1L & - partable$lhs %in% lv.names[[1L]] & - partable$lhs == partable$rhs ] <- TRUE - # LV COVARIANCES - if("lv.covariances" %in% group.equal) - g1.flag[ partable$op == "~~" & partable$group == 1L & - partable$lhs %in% lv.names[[1L]] & - partable$lhs != partable$rhs ] <- TRUE - - # if group.partial, set corresponding flag to FALSE - if(length(group.partial) > 0L) { - g1.flag[ label %in% group.partial & - partable$group == 1L ] <- FALSE - } - - # for each (constrained) parameter in 'group 1', find a similar one - # in the other groups (we assume here that the models need - # NOT be the same across groups! - g1.idx <- which(g1.flag) - for(i in 1:length(g1.idx)) { - ref.idx <- g1.idx[i] - idx <- which(partable$lhs == partable$lhs[ref.idx] & - partable$op == partable$op[ ref.idx] & - partable$rhs == partable$rhs[ref.idx] & - partable$group > 1L) - label[idx] <- label[ref.idx] - } - } - - #cat("DEBUG: g1.idx = ", g1.idx, "\n") - #cat("DEBUG: label after group.equal:\n"); print(label); cat("\n") - - # handle other block identifier (not 'group') - for(block in blocks) { - if(block == "group") { - next - } - label <- paste(label, ".", partable[[block]], sep = "") - } - - # user-specified labels -- override everything!! - user.idx <- which(nchar(partable$label) > 0L) - label[user.idx] <- partable$label[user.idx] - - #cat("DEBUG: user.idx = ", user.idx, "\n") - #cat("DEBUG: label after user.idx:\n"); print(label); cat("\n") - - # which labels do we need? - if(type == "user") { - idx <- 1:length(label) - } else if(type == "free") { - idx <- which(partable$free > 0L & !duplicated(partable$free)) - #} else if(type == "unco") { - # idx <- which(partable$unco > 0L & !duplicated(partable$unco)) - } else { - stop("argument `type' must be one of free or user") - } - - label[idx] -} - -# only for simsem .... -getParameterLabels <- lav_partable_labels - - -lav_partable_full <- function(partable = NULL, group = NULL, - strict.exo = FALSE, - free = FALSE, start = FALSE) { - - # check minimum requirements: lhs, op, rhs - stopifnot( !is.null(partable$lhs), - !is.null(partable$op), - !is.null(partable$rhs) ) - - # meanstructure - meanstructure <- any(partable$op == "~1") - - # number of groups - if(!is.null(partable$group)) { - if(is.character(partable$group)) { - group.label <- unique(partable$group) - group.label <- group.label[ nchar(group.label) > 0L ] - ngroups <- length(group.label) - } else { - ngroups <- max(partable$group) - } - } else { - ngroups <- 1L - } - - # extract `names' of various types of variables: - lv.names <- lav_partable_vnames(partable, type="lv", group=group) # latent variables - ov.names <- lav_partable_vnames(partable, type="ov", group=group) # observed variables - ov.names.x <- lav_partable_vnames(partable, type="ov.x",group=group) # exogenous x covariates - ov.names.nox <- lav_partable_vnames(partable, type="ov.nox",group=group) # ov's without exo's - lv.names.x <- lav_partable_vnames(partable, type="lv.x",group=group) # exogenous lv - ov.names.y <- lav_partable_vnames(partable, type="ov.y",group=group) # dependent ov - lv.names.y <- lav_partable_vnames(partable, type="lv.y",group=group) # dependent lv - lvov.names.y <- c(ov.names.y, lv.names.y) - ov.names.ord <- lav_partable_vnames(partable, type="ov.ord", group=group) - - # eqs.y - # eqs.y <- lav_partable_vnames(partable, type="eqs.y", group=group) - ov.names.ind <- lav_partable_vnames(partable, type="ov.ind", group=group) - - # 1 "=~" - l.lhs <- r.rhs <- op <- character(0) - l.lhs <- rep(lv.names, each=length(ov.names.nox)) - l.rhs <- rep(ov.names.nox, times=length(lv.names)) - - # remove factor ~ eqs.y combinations, if any - # because they also appear as a regression - #bad.idx <- which( l.lhs %in% lv.names & - # l.rhs %in% eqs.y) - #if(length(bad.idx) > 0L) { - # l.lhs <- l.lhs[-bad.idx] - # l.rhs <- l.rhs[-bad.idx] - #} - - l.op <- rep("=~", length(l.lhs)) - - # 2a. "~~" ov ## FIXME: ov.names.nox or ov.names?? - ov.lhs <- ov.rhs <- ov.op <- character(0) - #if(strict.exo) { - OV <- ov.names.nox - #} else { - # OV <- ov.names - #} - nx <- length(OV) - idx <- lower.tri(matrix(0, nx, nx), diag=TRUE) - ov.lhs <- rep(OV, each=nx)[idx] # fill upper.tri - ov.rhs <- rep(OV, times=nx)[idx] - ov.op <- rep("~~", length(ov.lhs)) - - # exo ~~ - if(!strict.exo && length(ov.names.x) > 0L) { - OV <- ov.names.x - nx <- length(OV) - idx <- lower.tri(matrix(0, nx, nx), diag=TRUE) - more.lhs <- rep(OV, each=nx)[idx] # fill upper.tri - more.rhs <- rep(OV, times=nx)[idx] - ov.lhs <- c(ov.lhs, more.lhs) - ov.rhs <- c(ov.rhs, more.rhs) - ov.op <- c(ov.op, rep("~~", length(more.lhs))) - } - - # 2b. "~~" lv - lv.lhs <- lv.rhs <- lv.op <- character(0) - nx <- length(lv.names) - idx <- lower.tri(matrix(0, nx, nx), diag=TRUE) - lv.lhs <- rep(lv.names, each=nx)[idx] # fill upper.tri - lv.rhs <- rep(lv.names, times=nx)[idx] - lv.op <- rep("~~", length(lv.lhs)) - - # 3 regressions? - r.lhs <- r.rhs <- r.op <- character(0) - if(any(partable$op == "~")) { - - eqs.names <- unique( c(partable$lhs[partable$op == "~"], - partable$rhs[partable$op == "~"]) ) - - eqs.y <- eqs.names - if(strict.exo) { - x.idx <- which(eqs.names %in% ov.names.x) - if(length(x.idx) > 0L) { - eqs.y <- eqs.names[-x.idx] - } - } - eqs.x <- eqs.names - - r.lhs <- rep(eqs.y, each=length(eqs.x)) - r.rhs <- rep(eqs.x, times=length(eqs.y)) - - # remove self-arrows - idx <- which(r.lhs == r.rhs) - r.lhs <- r.lhs[-idx] - r.rhs <- r.rhs[-idx] - - # remove indicator ~ factor if they exist - bad.idx <- which(r.lhs %in% ov.names.ind & - r.rhs %in% lv.names) - if(length(bad.idx) > 0L) { - r.lhs <- r.lhs[-bad.idx] - r.rhs <- r.rhs[-bad.idx] - } - - r.op <- rep("~", length(r.rhs)) - } - - # 4. intercepts - int.lhs <- int.rhs <- int.op <- character(0) - if(meanstructure) { - if(strict.exo) { - int.lhs <- c(ov.names.nox, lv.names) - } else { - int.lhs <- c(ov.names, lv.names) - } - int.rhs <- rep("", length(int.lhs)) - int.op <- rep("~1", length(int.lhs)) - } - - # 5. thresholds - th.lhs <- th.rhs <- th.op <- character(0) - if(length(ov.names.ord) > 0L) { - tmp <- strsplit(lav_partable_vnames(partable, "th", group=group), "\\|") - th.lhs <- sapply(tmp, function(x) x[1]) - th.rhs <- sapply(tmp, function(x) x[2]) - th.op <- rep("|", length(th.lhs)) - } - - # 6. scaling parameters - delta.lhs <- delta.rhs <- delta.op <- character(0) - if(ngroups > 1L && length(ov.names.ord) > 0L) { - delta.lhs <- ov.names.ord - delta.rhs <- ov.names.ord - delta.op <- rep("~*~", length(delta.lhs)) - } - - # combine - lhs <- c(l.lhs, ov.lhs, lv.lhs, r.lhs, int.lhs, th.lhs, delta.lhs) - rhs <- c(l.rhs, ov.rhs, lv.rhs, r.rhs, int.rhs, th.rhs, delta.rhs) - op <- c(l.op, ov.op, lv.op, r.op, int.op, th.op, delta.op) - - - # multiple groups! - group <- 1L - if(ngroups > 1) { - group <- rep(1:ngroups, each=length(lhs)) - lhs <- rep(lhs, times=ngroups) - op <- rep(op, times=ngroups) - rhs <- rep(rhs, times=ngroups) - } - - LIST <- data.frame(lhs=lhs, op=op, rhs=rhs, group=group, - stringsAsFactors=FALSE) - - if(free) { - LIST$free <- rep(0L, nrow(LIST)) - } - - if(start) { - LIST$start <- rep(0, nrow(LIST)) } LIST } -lav_partable_flat <- function(FLAT = NULL, - blocks = "group", - meanstructure = FALSE, - int.ov.free = FALSE, - int.lv.free = FALSE, - orthogonal = FALSE, - std.lv = FALSE, - conditional.x = FALSE, - fixed.x = TRUE, - parameterization = "delta", - auto.fix.first = FALSE, - auto.fix.single = FALSE, - auto.var = FALSE, - auto.cov.lv.x = FALSE, - auto.cov.y = FALSE, - auto.th = FALSE, - auto.delta = FALSE, - varTable = NULL, - group.equal = NULL, - group.w.free = FALSE, - ngroups = 1L) { - - categorical <- FALSE - - ### DEFAULT elements: parameters that are typically not specified by - ### users, but should typically be considered, - ### either free or fixed - - # extract `names' of various types of variables: - lv.names <- lav_partable_vnames(FLAT, type="lv") # latent variables - #lv.names.r <- lav_partable_vnames(FLAT, type="lv.regular") # regular latent variables - lv.names.f <- lav_partable_vnames(FLAT, type="lv.formative") # formative latent variables - ov.names <- lav_partable_vnames(FLAT, type="ov") # observed variables - ov.names.x <- lav_partable_vnames(FLAT, type="ov.x") # exogenous x covariates - ov.names.nox <- lav_partable_vnames(FLAT, type="ov.nox") - lv.names.x <- lav_partable_vnames(FLAT, type="lv.x") # exogenous lv - ov.names.y <- lav_partable_vnames(FLAT, type="ov.y") # dependent ov - lv.names.y <- lav_partable_vnames(FLAT, type="lv.y") # dependent lv - #lvov.names.y <- c(ov.names.y, lv.names.y) - lvov.names.y <- c(lv.names.y, ov.names.y) - - - # get 'ordered' variables, either from FLAT or varTable - ov.names.ord1 <- lav_partable_vnames(FLAT, type="ov.ord") - # check if we have "|" for exogenous variables - if(length(ov.names.ord1) > 0L) { - idx <- which(ov.names.ord1 %in% ov.names.x) - if(length(idx) > 0L) { - warning("lavaan WARNING: thresholds are defined for exogenous variables: ", paste(ov.names.ord1[idx], collapse=" ")) - } - } - - if(!is.null(varTable)) { - ov.names.ord2 <- as.character(varTable$name[ varTable$type == "ordered" ]) - # remove fixed.x variables - idx <- which(ov.names.ord2 %in% ov.names.x) - if(length(idx) > 0L) { - ov.names.ord2 <- ov.names.ord2[-idx] - } - - # remove those that do appear in the model syntax - idx <- which(!ov.names.ord2 %in% ov.names) - if(length(idx) > 0L) { - ov.names.ord2 <- ov.names.ord2[-idx] - } - } else { - ov.names.ord2 <- character(0) - } - #### FIXME!!!!! ORDER! - ov.names.ord <- unique(c(ov.names.ord1, ov.names.ord2)) - - # if we have the "|" in the model syntax, check the number of thresholds - if(!is.null(varTable) && length(ov.names.ord1) > 0L) { - for(o in ov.names.ord1) { - nth <- varTable$nlev[ varTable$name == o ] - 1L - nth.in.partable <- sum(FLAT$op == "|" & FLAT$lhs == o) - if(nth != nth.in.partable) { - stop("lavaan ERROR: expected ", nth, - " threshold(s) for variable ", - sQuote(o), "; syntax contains ", nth.in.partable, "\n") - } - } - } - - if(length(ov.names.ord) > 0L) - categorical <- TRUE - - lhs <- rhs <- character(0) - - # 1. THRESHOLDS (based on varTable) - # NOTE: - new in 0.5-18: ALWAYS include threshold parameters in partable, - # but only free them if auto.th = TRUE - # - only ov.names.ord2, because ov.names.ord1 are already in USER - # and we only need to add 'default' parameters here - nth <- 0L - #if(auto.th && length(ov.names.ord2) > 0L) { - if(length(ov.names.ord2) > 0L) { - for(o in ov.names.ord2) { - nth <- varTable$nlev[ varTable$name == o ] - 1L - if(nth < 1L) next - lhs <- c(lhs, rep(o, nth)) - rhs <- c(rhs, paste("t", seq_len(nth), sep="")) - } - nth <- length(lhs) - } - - # 2. default (residual) variances and covariances - - # a) (residual) VARIANCES (all ov's except exo, and all lv's) - # NOTE: change since 0.5-17: we ALWAYS include the vars in the - # parameter table; but only if auto.var = TRUE, we set them free - #if(auto.var) { - ov.var <- ov.names.nox - # auto-remove ordinal variables - #idx <- match(ov.names.ord, ov.var) - #if(length(idx)) ov.var <- ov.var[-idx] - lhs <- c(lhs, ov.var, lv.names) - rhs <- c(rhs, ov.var, lv.names) - #} - - # b) `independent` latent variable COVARIANCES (lv.names.x) - if(auto.cov.lv.x && length(lv.names.x) > 1L) { - tmp <- utils::combn(lv.names.x, 2) - lhs <- c(lhs, tmp[1,]) # to fill upper.tri - rhs <- c(rhs, tmp[2,]) - } - - # c) `dependent` latent variables COVARIANCES (lv.y.idx + ov.y.lv.idx) - if(auto.cov.y && length(lvov.names.y) > 1L) { - tmp <- utils::combn(lvov.names.y, 2L) - lhs <- c(lhs, tmp[1,]) # to fill upper.tri - rhs <- c(rhs, tmp[2,]) - } - - # d) exogenous x covariates: VARIANCES + COVARIANCES - if(!conditional.x && (nx <- length(ov.names.x)) > 0L) { - idx <- lower.tri(matrix(0, nx, nx), diag=TRUE) - lhs <- c(lhs, rep(ov.names.x, each=nx)[idx]) # fill upper.tri - rhs <- c(rhs, rep(ov.names.x, times=nx)[idx]) - } - - # create 'op' (thresholds come first, then variances) - op <- rep("~~", length(lhs)); op[seq_len(nth)] <- "|" - - # LATENT RESPONSE SCALES (DELTA) - # NOTE: - new in 0.5-19: ALWAYS include scaling parameters in partable, - # but only free them if auto.delta = TRUE (and parameterization - # is "delta" - #if(auto.delta && auto.th && length(ov.names.ord) > 0L && - # # length(lv.names) > 0L && - # (ngroups > 1L || any(FLAT$op == "~*~") || parameterization == "theta")) { - if(length(ov.names.ord) > 0L) { - lhs <- c(lhs, ov.names.ord) - rhs <- c(rhs, ov.names.ord) - op <- c(op, rep("~*~", length(ov.names.ord))) - } - - # 3. INTERCEPTS - if(meanstructure) { - if(conditional.x) { - ov.int <- ov.names.nox - } else { - ov.int <- ov.names - } - # auto-remove ordinal variables - #idx <- which(ov.int %in% ov.names.ord) - #if(length(idx)) ov.int <- ov.int[-idx] - - int.lhs <- c(ov.int, lv.names) - lhs <- c(lhs, int.lhs) - rhs <- c(rhs, rep("", length(int.lhs))) - op <- c(op, rep("~1", length(int.lhs))) - } - - # free group weights - if(group.w.free) { - lhs <- c(lhs, "group") - rhs <- c(rhs, "w") - op <- c(op, "%") - } - - DEFAULT <- data.frame(lhs=lhs, op=op, rhs=rhs, - mod.idx=rep(0L, length(lhs)), - stringsAsFactors=FALSE) - - - # 4. USER: user-specified elements - lhs <- FLAT$lhs - op <- FLAT$op - rhs <- FLAT$rhs - mod.idx <- FLAT$mod.idx - - lv.names <- lav_partable_vnames(FLAT, type="lv") # latent variables - ov.names <- lav_partable_vnames(FLAT, type="ov") # observed variables - - # check order of covariances: we only fill the upper.tri! - cov.idx <- which(op == "~~" & lhs != rhs) - for(i in cov.idx) { - lv.ov.names <- c(lv.names, ov.names) ### FIXME!!! OK?? - lv.idx <- match(c(lhs[i], rhs[i]), lv.ov.names) - if(lv.idx[1] > lv.idx[2]) { # swap! - tmp <- lhs[i]; lhs[i] <- rhs[i]; rhs[i] <- tmp - } - if(lhs[i] %in% lv.names && rhs[i] %in% lv.names) { - lv.idx <- match(c(lhs[i], rhs[i]), lv.names) - if(lv.idx[1] > lv.idx[2]) { # swap! - tmp <- lhs[i]; lhs[i] <- rhs[i]; rhs[i] <- tmp - } - } else if(lhs[i] %in% ov.names && rhs[i] %in% ov.names) { - ov.idx <- match(c(lhs[i], rhs[i]), ov.names) - if(ov.idx[1] > ov.idx[2]) { # swap! - tmp <- lhs[i]; lhs[i] <- rhs[i]; rhs[i] <- tmp - } - } else { # mixed!! # we allow this since 0.4-10 - lv.ov.names <- c(lv.names, ov.names) ### FIXME!!! OK?? - lv.idx <- match(c(lhs[i], rhs[i]), lv.ov.names) - if(lv.idx[1] > lv.idx[2]) { # swap! - tmp <- lhs[i]; lhs[i] <- rhs[i]; rhs[i] <- tmp - } - } - } - - USER <- data.frame(lhs=lhs, op=op, rhs=rhs, mod.idx=mod.idx, - stringsAsFactors=FALSE) - - # check for duplicated elements in USER - TMP <- USER[,1:3] - idx <- which(duplicated(TMP)) - if(length(idx) > 0L) { - txt <- sapply(1:length(idx), function(i) { - paste(" ", TMP[idx[i],"lhs"], - TMP[idx[i], "op"], - TMP[idx[i],"rhs"]) }) - warning("duplicated elements in model syntax have been ignored:\n", - paste(txt, collapse = "\n")) - USER <- USER[-idx,] - } - - # check for duplicated elements in DEFAULT - # - FIXME: can we not avoid this somehow?? - # - for example, if the user model includes 'x1 ~~ x1' - # or 'x1 ~ 1' - # - remove them from DEFAULT - TMP <- rbind(DEFAULT[,1:3], USER[,1:3]) - idx <- which(duplicated(TMP, fromLast=TRUE)) # idx should be in DEFAULT - if(length(idx)) { - for(i in idx) { - flat.idx <- which(USER$lhs == DEFAULT$lhs[i] & - USER$op == DEFAULT$op[i] & - USER$rhs == DEFAULT$rhs[i]) - if(length(flat.idx) != 1L) { - cat("[lavaan DEBUG] idx in TMP: i = ", i, "\n"); print(TMP[i,]) - cat("[lavaan DEBUG] idx in DEFAULT: i = ", i, "\n"); print(DEFAULT[i,]) - cat("[lavaan DEBUG] flat.idx:"); print(flat.idx) - } - } - DEFAULT <- DEFAULT[-idx,] - } - - # now that we have removed all duplicated elements, we can construct - # the LIST for a single group/block - lhs <- c(USER$lhs, DEFAULT$lhs) - op <- c(USER$op, DEFAULT$op) - rhs <- c(USER$rhs, DEFAULT$rhs) - user <- c(rep(1L, length(USER$lhs)), - rep(0L, length(DEFAULT$lhs))) - mod.idx <- c(USER$mod.idx, DEFAULT$mod.idx) - free <- rep(1L, length(lhs)) - ustart <- rep(as.numeric(NA), length(lhs)) - #label <- paste(lhs, op, rhs, sep="") - label <- rep(character(1), length(lhs)) - exo <- rep(0L, length(lhs)) - - # 0a. if auto.th = FALSE, set fix the thresholds - if(!auto.th) { - th.idx <- which(op == "|" & user == 0L) - free[th.idx] <- 0L - } - - # 0b. if auto.var = FALSE, set the unspecified variances to zero - if(!auto.var) { - var.idx <- which(op == "~~" & - lhs == rhs & - user == 0L) - ustart[var.idx] <- 0.0 - free[var.idx] <- 0L - } else { - # 'formative' (residual) variances are set to zero by default - var.idx <- which(op == "~~" & - lhs == rhs & - lhs %in% lv.names.f & - user == 0L) - ustart[var.idx] <- 0.0 - free[var.idx] <- 0L - } - - - # 1. fix metric of regular latent variables - if(std.lv) { - # fix metric by fixing the variance of the latent variable - lv.var.idx <- which(op == "~~" & - lhs %in% lv.names & lhs == rhs) - ustart[lv.var.idx] <- 1.0 - free[lv.var.idx] <- 0L - } - if(auto.fix.first) { - # fix metric by fixing the loading of the first indicator - mm.idx <- which(op == "=~") - first.idx <- mm.idx[which(!duplicated(lhs[mm.idx]))] - ustart[first.idx] <- 1.0 - free[first.idx] <- 0L - } - - # 2. fix residual variance of single indicators to zero - if(auto.var && auto.fix.single) { - mm.idx <- which(op == "=~") - T <- table(lhs[mm.idx]) - if(any(T == 1L)) { - # ok, we have a LV with only a single indicator - lv.names.single <- names(T)[T == 1L] - # get corresponding indicator if unique - lhs.mm <- lhs[mm.idx]; rhs.mm <- rhs[mm.idx] - single.ind <- rhs.mm[which(lhs.mm %in% lv.names.single & - !(duplicated(rhs.mm) | - duplicated(rhs.mm, fromLast=TRUE)))] - # is the indicator unique? - if(length(single.ind)) { - var.idx <- which(op == "~~" & lhs %in% single.ind - & rhs %in% single.ind - & lhs == rhs - & user == 0L) - ustart[var.idx] <- 0.0 - free[var.idx] <- 0L - } - } - } - - # 3. orthogonal=TRUE? - if(orthogonal) { - # FIXME: only lv.x.idx for now - lv.cov.idx <- which(op == "~~" & - lhs %in% lv.names & - lhs != rhs & - user == 0L) - ustart[lv.cov.idx] <- 0.0 - free[lv.cov.idx] <- 0L - } - - # 4. intercepts - if(meanstructure) { - if(categorical) { - # zero intercepts/means ordinal variables - ov.int.idx <- which(op == "~1" & - lhs %in% ov.names.ord & - user == 0L) - ustart[ov.int.idx] <- 0.0 - free[ov.int.idx] <- 0L - } - if(int.ov.free == FALSE) { - # zero intercepts/means observed variables - ov.int.idx <- which(op == "~1" & - lhs %in% ov.names & - user == 0L) - ustart[ov.int.idx] <- 0.0 - free[ov.int.idx] <- 0L - } - if(int.lv.free == FALSE) { - # zero intercepts/means latent variables - lv.int.idx <- which(op == "~1" & - lhs %in% lv.names & - user == 0L) - ustart[lv.int.idx] <- 0.0 - free[lv.int.idx] <- 0L - } - } - - # 5. handle exogenous `x' covariates - if(length(ov.names.x) > 0) { - - # 1. variances/covariances - exo.var.idx <- which(op == "~~" & - rhs %in% ov.names.x & - user == 0L) - if(fixed.x) { - ustart[exo.var.idx] <- as.numeric(NA) # should be overriden later! - free[exo.var.idx] <- 0L - exo[exo.var.idx] <- 1L - } else if(conditional.x) { - exo[exo.var.idx] <- 1L - } - - # 2. intercepts - exo.int.idx <- which(op == "~1" & - lhs %in% ov.names.x & - user == 0L) - if(fixed.x) { - ustart[exo.int.idx] <- as.numeric(NA) # should be overriden later! - free[exo.int.idx] <- 0L - exo[exo.int.idx] <- 1L - } else if(conditional.x) { - exo[exo.int.idx] <- 1L - } - - # 3. regressions ov + lv - exo.reg.idx <- which(op == "~" & - lhs %in% c(lv.names, ov.names.nox) & - rhs %in% ov.names.x) - if(conditional.x) { - exo[exo.reg.idx] <- 1L - } - } - - # 5b. residual variances of ordinal variables? - if(length(ov.names.ord) > 0L) { - ord.idx <- which(lhs %in% ov.names.ord & - op == "~~" & - lhs == rhs) - ustart[ord.idx] <- 1L ## FIXME!! or 0?? (0 breaks ex3.12) - free[ord.idx] <- 0L - } - - # 5c latent response scales of ordinal variables? - # by default, all fixed to 1.0 - if(length(ov.names.ord) > 0L) { - delta.idx <- which(op == "~*~") - ustart[delta.idx] <- 1.0 - free[delta.idx] <- 0L - } - - # group proportions (group 1L) - if(group.w.free) { - group.idx <- which(lhs == "group" & op == "%") - #if(ngroups > 1L) { - free[ group.idx ] <- 1L - ustart[ group.idx ] <- as.numeric(NA) - #} else { - # free[ group.idx ] <- 0L - # ustart[ group.idx ] <- 0.0 # last group - #} - } - - # 6. multiple groups? - group <- rep(1L, length(lhs)) - if(ngroups > 1) { - group <- rep(1:ngroups, each=length(lhs)) - user <- rep(user, times=ngroups) - lhs <- rep(lhs, times=ngroups) - op <- rep(op, times=ngroups) - rhs <- rep(rhs, times=ngroups) - free <- rep(free, times=ngroups) - ustart <- rep(ustart, times=ngroups) - mod.idx <- rep(mod.idx, times=ngroups) - label <- rep(label, times=ngroups) - exo <- rep(exo, times=ngroups) - - # specific changes per group - for(g in 2:ngroups) { - # label - # label[group == g] <- paste(label[group == 1], ".g", g, sep="") - - # free/fix intercepts - if(meanstructure) { - int.idx <- which(op == "~1" & - lhs %in% lv.names & - user == 0L & - group == g) - if(int.lv.free == FALSE && g > 1 && - ("intercepts" %in% group.equal || - "thresholds" %in% group.equal) && - !("means" %in% group.equal) ) { - free[ int.idx ] <- 1L - ustart[ int.idx ] <- as.numeric(NA) - } - } - - # latent response scaling - if(auto.delta && parameterization == "delta") { - if(any(op == "~*~" & group == g) && - ("thresholds" %in% group.equal)) { - delta.idx <- which(op == "~*~" & group == g) - free[ delta.idx ] <- 1L - ustart[ delta.idx ] <- as.numeric(NA) - } - } else if(parameterization == "theta") { - if(any(op == "~*~" & group == g) && - ("thresholds" %in% group.equal)) { - var.ord.idx <- which(op == "~~" & group == g & - lhs %in% ov.names.ord & lhs == rhs) - free[ var.ord.idx ] <- 1L - ustart[ var.ord.idx ] <- as.numeric(NA) - } - } - - # group proportions - if(group.w.free) { - group.idx <- which(lhs == "group" & op == "%" & group == g) - #if(g == ngroups) { - # free[ group.idx ] <- 0L - # ustart[ group.idx ] <- 0.0 # last group - #} else { - free[ group.idx ] <- 1L - ustart[ group.idx ] <- as.numeric(NA) - #} - } - } # g - } # ngroups - - # construct LIST - LIST <- list( id = seq_along(lhs), - lhs = lhs, - op = op, - rhs = rhs, - user = user) - - # block columns (typically only group) - for(block in blocks) { - if(block == "group") { - LIST[[block]] <- group - } else { - LIST[[block]] <- rep(0L, length(lhs)) - } - } - - # other columns - LIST2 <- list(mod.idx = mod.idx, - free = free, - ustart = ustart, - exo = exo, - label = label) - - LIST <- c(LIST, LIST2) -} - diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_subset.R r-cran-lavaan-0.5.23.1097/R/lav_partable_subset.R --- r-cran-lavaan-0.5.22/R/lav_partable_subset.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_subset.R 2017-02-17 07:29:04.000000000 +0000 @@ -0,0 +1,173 @@ +# YR 11 feb 2017: initial version + +# given a parameter table (PT), extract a part of the model: +# eg.: +# - only the measurement model (with saturated latent variables +# - only the stuctural part +# - a single measurement model (1 factor only) +# ... + +lav_partable_subset_measurement_model <- function(PT = NULL, + lavpta = NULL, + lv.names = NULL) { + + # PT + PT <- as.data.frame(PT, stringsAsFactors = FALSE) + + # lavpta + if(is.null(lavpta)) { + lavpta <- lav_partable_attributes(PT) + } + + # ngroups + ngroups <- lavpta$ngroups + + # lv.names: list with element per group + if(is.null(lv.names)) { + lv.names <- lavpta$vnames$lv.regular + } else if(!is.list(lv.names)) { + lv.names <- list(lv.names) + } + + # which latent variables should we remove? + lv.names.rm <- lapply(1:ngroups, function(g) { + lavpta$vnames$lv.regular[[g]][ + !lavpta$vnames$lv.regular[[g]] %in% lv.names[[g]] ] + }) + + # remove rows idx + rm.idx <- integer(0L) + + # remove not-needed measurement models + for(g in 1:ngroups) { + # indicators for not-needed latent variables + IND.idx <- which( PT$op == "=~" & + !PT$lhs %in% lv.names[[g]] & + PT$group == g ) + IND <- PT$rhs[ IND.idx ] + + # remove =~ + rm.idx <- c(rm.idx, IND.idx) + + # remove ~~ + VAR.idx <- which( PT$op == "~~" & + ( PT$lhs %in% IND | + PT$rhs %in% IND | + PT$lhs %in% lv.names.rm[[g]] | + PT$rhs %in% lv.names.rm[[g]] ) & + PT$group == g ) + rm.idx <- c(rm.idx, VAR.idx) + + # regressions, involving a latent variable + LV.EQS.idx <- which( PT$op == "~" & + ( PT$lhs %in% lavpta$vnames$lv.regular[[g]] | + PT$rhs %in% lavpta$vnames$lv.regular[[g]] ) & + PT$group == g ) + rm.idx <- c(rm.idx, LV.EQS.idx) + + # regressions, involving indicators + OV.EQS.idx <- which( PT$op == "~" & + ( PT$lhs %in% IND | + PT$rhs %in% IND ) & + PT$group == g ) + rm.idx <- c(rm.idx, OV.EQS.idx) + + # intercepts indicators + OV.INT.idx <- which( PT$op == "~1" & + PT$lhs %in% IND & + PT$group == g ) + rm.idx <- c(rm.idx, OV.INT.idx) + + # intercepts latent variables + LV.INT.idx <- which( PT$op == "~1" & + PT$lhs %in% lv.names.rm[[g]] & + PT$group == g ) + rm.idx <- c(rm.idx, LV.INT.idx) + + # thresholds + TH.idx <- which( PT$op == "|" & + PT$lhs %in% IND & + PT$group == g ) + rm.idx <- c(rm.idx, TH.idx) + + # scaling factors + SC.idx <- which( PT$op == "~*~" & + PT$lhs %in% IND & + PT$group == g ) + rm.idx <- c(rm.idx, SC.idx) + + # FIXME: ==, :=, <, >, == involving IND... + } + + if(length(rm.idx) > 0L) { + PT <- PT[-rm.idx,,drop = FALSE] + } + + # clean up + PT <- lav_partable_complete(PT) + + # check if we have enough indicators? + # TODO + + PT +} + +lav_partable_subset_structural_model <- function(PT = NULL, + lavpta = NULL) { + + # PT + PT <- as.data.frame(PT, stringsAsFactors = FALSE) + + # lavpta + if(is.null(lavpta)) { + lavpta <- lav_partable_attributes(PT) + } + + # ngroups + ngroups <- lavpta$ngroups + + # eqs.names + eqs.x.names <- lavpta$vnames$eqs.x + eqs.y.names <- lavpta$vnames$eqs.y + + # keep rows idx + keep.idx <- integer(0L) + + # remove not-needed measurement models + for(g in 1:ngroups) { + + # eqs.names + eqs.names <- unique( c(lavpta$vnames$eqs.x[[g]], + lavpta$vnames$eqs.y[[g]]) ) + + # regressions + reg.idx <- which(PT$op == "~" & PT$group == g & + PT$lhs %in% eqs.names & + PT$rhs %in% eqs.names) + + # the variances + var.idx <- which(PT$op == "~~" & PT$group == g & + PT$lhs %in% eqs.names & + PT$rhs %in% eqs.names & + PT$lhs == PT$rhs) + + # optionally covariances (exo!) + cov.idx <- which(PT$op == "~~" & PT$group == g & + PT$lhs %in% eqs.names & + PT$rhs %in% eqs.names & + PT$lhs != PT$rhs) + + # means/intercepts + int.idx <- which(PT$op == "~1" & PT$group == g & + PT$lhs %in% eqs.names) + + keep.idx <- c(keep.idx, reg.idx, var.idx, cov.idx, int.idx) + } + + PT <- PT[keep.idx, , drop = FALSE] + + # clean up + PT <- lav_partable_complete(PT) + + PT +} diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_unrestricted.R r-cran-lavaan-0.5.23.1097/R/lav_partable_unrestricted.R --- r-cran-lavaan-0.5.22/R/lav_partable_unrestricted.R 2016-02-07 16:43:47.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_unrestricted.R 2017-02-21 09:00:34.000000000 +0000 @@ -256,6 +256,7 @@ op = op, rhs = rhs, user = rep(1L, length(lhs)), + block = group, # for now group = group, #mod.idx = rep(0L, length(lhs)), free = free, diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_utils.R r-cran-lavaan-0.5.23.1097/R/lav_partable_utils.R --- r-cran-lavaan-0.5.22/R/lav_partable_utils.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_utils.R 2017-02-22 09:44:53.000000000 +0000 @@ -0,0 +1,165 @@ +# guess number of blocks from a partable +lav_partable_nblocks <- function(partable) { + + if(is.null(partable$block)) { + nblocks <- 1L + } else { + # always integers + tmp <- partable$block[ partable$block > 0L ] # non-zero only + nblocks <- length(unique(na.omit(tmp))) # all combinations + } + + nblocks +} + +# what are the block values (not necessarly 1..nb) +lav_partable_block_values <- function(partable) { + + if(is.null(partable$block)) { + block.values <- 1L + } else { + # always integers + tmp <- partable$block[ partable$block > 0L ] # non-zero only + block.values <- unique(na.omit(tmp)) # could be, eg, '2' only + } + + block.values +} + +# guess number of groups from a partable +lav_partable_ngroups <- function(partable) { + + if(is.null(partable$group)) { + ngroups <- 1L + } else if(is.numeric(partable$group)) { + tmp <- partable$group[ partable$group > 0L ] + ngroups <- length(unique(na.omit(tmp))) + } else { # character + tmp <- partable$group[nchar(partable$group) > 0L] + ngroups <- length(unique(na.omit(tmp))) + } + + ngroups +} + +# guess number of levels from a partable +lav_partable_nlevels <- function(partable) { + + if(is.null(partable$level)) { + nlevels. <- 1L + } else if(is.numeric(partable$level)) { + tmp <- partable$level[ partable$level > 0L ] + nlevels. <- length(unique(na.omit(tmp))) + } else { # character + tmp <- partable$level[nchar(partable$level) > 0L] + nlevels. <- length(unique(na.omit(tmp))) + } + + nlevels. +} + +# number of sample statistics per block +lav_partable_ndat <- function(partable) { + + # global + meanstructure <- any(partable$op == "~1") + fixed.x <- any(partable$exo > 0L & partable$free == 0L) + conditional.x <- any(partable$exo > 0L & partable$op == "~") + categorical <- any(partable$op == "|") + if(categorical) { + meanstructure <- TRUE + } + + # blocks + nblocks <- lav_partable_nblocks(partable) + ndat <- integer(nblocks) + + for(b in seq_len(nblocks)) { + + # how many observed variables in this block? + if(conditional.x) { + ov.names <- lav_partable_vnames(partable, "ov.nox", block = b) + } else { + ov.names <- lav_partable_vnames(partable, "ov", block = b) + } + nvar <- length(ov.names) + + # pstar + pstar <- nvar*(nvar+1)/2 + if(meanstructure) { + pstar <- pstar + nvar + } + + ndat[b] <- pstar + + # correction for fixed.x? + if(!conditional.x && fixed.x) { + ov.names.x <- lav_partable_vnames(partable, "ov.x", block = b) + nvar.x <- length(ov.names.x) + pstar.x <- nvar.x * (nvar.x + 1) / 2 + if(meanstructure) { + pstar.x <- pstar.x + nvar.x + } + ndat[b] <- ndat[b] - pstar.x + } + + # correction for ordinal data? + if(categorical) { + ov.names.x <- lav_partable_vnames(partable, "ov.x", block = b) + nexo <- length(ov.names.x) + ov.ord <- lav_partable_vnames(partable, "ov.ord", block = b) + nvar.ord <- length(ov.ord) + th <- lav_partable_vnames(partable, "th", block = b) + nth <- length(th) + # no variances + ndat[b] <- ndat[b] - nvar.ord + # no means + ndat[b] <- ndat[b] - nvar.ord + # but additional thresholds + ndat[b] <- ndat[b] + nth + # add slopes + ndat[b] <- ndat[b] + (nvar * nexo) + } + + # correction for conditional.x not categorical + if(conditional.x && !categorical) { + ov.names.x <- lav_partable_vnames(partable, "ov.x", block = b) + nexo <- length(ov.names.x) + # add slopes + ndat[b] <- ndat[b] + (nvar * nexo) + } + + # correction for group proportions? + group.idx <- which(partable$lhs == "group" & + partable$op == "%" & + partable$block == b) + if(length(group.idx) > 0L) { + # ndat <- ndat + (length(group.idx) - 1L) # G - 1 (sum to one) + ndat[b] <- ndat[b] + 1L # poisson: each cell a parameter + } + } # blocks + + # sum over all blocks + sum(ndat) +} + +# total number of free parameters +lav_partable_npar <- function(partable) { + + # we only assume non-zero values + npar <- length( which(partable$free > 0L) ) + npar +} + +# global degrees of freedom: ndat - npar +lav_partable_df <- function(partable) { + + npar <- lav_partable_npar(partable) + ndat <- lav_partable_ndat(partable) + + # degrees of freedom + df <- ndat - npar + + as.integer(df) +} + diff -Nru r-cran-lavaan-0.5.22/R/lav_partable_vnames.R r-cran-lavaan-0.5.23.1097/R/lav_partable_vnames.R --- r-cran-lavaan-0.5.22/R/lav_partable_vnames.R 2016-04-26 18:19:38.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_partable_vnames.R 2017-02-22 09:45:32.000000000 +0000 @@ -1,37 +1,47 @@ -# lav_partable_names +# lav_partable_names # -# YR. 29 june 2013 +# YR. 29 june 2013 # - as separate file; used to be in utils-user.R # - lav_partable_names (aka 'vnames') allows multiple options in 'type' # returning them all as a list (or just a vector if only 1 type is needed) # public version -lavNames <- function(object, type = "ov", group = NULL) { +lavNames <- function(object, type = "ov", ...) { - if(inherits(object, "lavaan")) { + if(inherits(object, "lavaan") || inherits(object, "lavaanList")) { partable <- object@ParTable } else if(class(object) == "list" || inherits(object, "data.frame")) { partable <- object + } else if(class(object) == "character") { + # just a model string? + partable <- lavParseModelString(object) } - lav_partable_vnames(partable, type = type, group = group) + lav_partable_vnames(partable, type = type, ...) } # alias for backwards compatibility lavaanNames <- lavNames # return variable names in a partable -# - the 'type' argument determines the status of the variable: observed, +# - the 'type' argument determines the status of the variable: observed, # latent, endo/exo/...; default = "ov", but most used is type = "all" # - the 'group' argument either selects a single group (if group is an integer) # or returns a list per group -lav_partable_vnames <- function(partable, type = NULL, group = NULL, +# - the 'level' argument either selects a single level (if level is an integer) +# or returns a list per level +# - the 'block' argument either selects a single block (if block is an integer) +# or returns a list per block +lav_partable_vnames <- function(partable, type = NULL, ..., warn = FALSE, ov.x.fatal = FALSE) { # check for empy table if(length(partable$lhs) == 0) return(character(0L)) + # dotdotdot + dotdotdot <- list(...) + type.list <- c("ov", # observed variables (ov) "ov.x", # (pure) exogenous observed variables "ov.nox", # non-exogenous observed variables @@ -53,91 +63,110 @@ "lv.nox", # non-exogenous latent variables "lv.nonnormal",# latent variables with non-normal indicators "lv.interaction", # interaction terms - + "eqs.y", # y's in regression "eqs.x" # x's in regression ) # sanity check - stopifnot(is.list(partable), - !missing(type), + stopifnot(is.list(partable), + !missing(type), type %in% c(type.list, "all")) if(length(type) == 1L && type == "all") { type <- type.list } - # if `group' is missing in partable, just add group=1L - if(is.null(partable$group)) { - partable$group <- rep(1L, length(partable$lhs)) - ngroups <- 1L - } else { - if(is.character(partable$group)) { - group.label <- unique(partable$group) - group.label <- group.label[ nchar(group.label) > 0L ] - ngroups <- length(group.label) - } else { - ngroups <- max(partable$group) + # ALWAYS need `block' column -- create one if missing + if(is.null(partable$block)) { + partable$block <- rep(1L, length(partable$lhs)) + } + + # nblocks -- block column is integer only + nblocks <- lav_partable_nblocks(partable) + + # per default, use full partable + block.select <- lav_partable_block_values(partable) + + # check for ... selection argument(s) + ndotdotdot <- length(dotdotdot) + if(ndotdotdot > 0L) { + dot.names <- names(dotdotdot) + block.select <- rep(TRUE, length(partable$lhs)) + for(dot in seq_len(ndotdotdot)) { + + # selection variable? + block.var <- dot.names[dot] + block.val <- dotdotdot[[block.var]] + # do we have this 'block.var' in partable? + if(is.null(partable[[block.var]])) { + stop("lavaan ERROR: selection variable `", block.var, " not found in the parameter table.") + } else { + if(!all(block.val %in% partable[[block.var]])) { + stop("lavaan ERROR: ", block.var , + " column does not contain value `", block.val, "'") + } + block.select <- ( block.select & + partable[[block.var]] %in% block.val ) + } + } # dot + block.select <- unique(partable$block[block.select]) + + if(length(block.select) == 0L) { + warnings("lavaan WARNING: no blocks selected.") } } - # handle group argument - group.orig <- group - if(is.numeric(group)) { - group <- as.integer(group) - stopifnot(all(group %in% partable$group)) - } else if(is.null(group) || group == "list") { - group <- seq_len(ngroups) - } - # output: list per group - OUT <- vector("list", length=ngroups) - OUT$ov <- vector("list", length=ngroups) - OUT$ov.x <- vector("list", length=ngroups) - OUT$ov.nox <- vector("list", length=ngroups) - OUT$ov.model <- vector("list", length=ngroups) - OUT$ov.y <- vector("list", length=ngroups) - OUT$ov.num <- vector("list", length=ngroups) - OUT$ov.ord <- vector("list", length=ngroups) - OUT$ov.ind <- vector("list", length=ngroups) - OUT$ov.orphan <- vector("list", length=ngroups) - OUT$ov.interaction <- vector("list", length=ngroups) - OUT$th <- vector("list", length=ngroups) - OUT$th.mean <- vector("list", length=ngroups) - - OUT$lv <- vector("list", length=ngroups) - OUT$lv.regular <- vector("list", length=ngroups) - OUT$lv.formative <- vector("list", length=ngroups) - OUT$lv.x <- vector("list", length=ngroups) - OUT$lv.y <- vector("list", length=ngroups) - OUT$lv.nox <- vector("list", length=ngroups) - OUT$lv.nonnormal <- vector("list", length=ngroups) - OUT$lv.interaction <- vector("list", length=ngroups) - OUT$eqs.y <- vector("list", length=ngroups) - OUT$eqs.x <- vector("list", length=ngroups) + # output: list per block + OUT <- vector("list", length = nblocks) + OUT$ov <- vector("list", length = nblocks) + OUT$ov.x <- vector("list", length = nblocks) + OUT$ov.nox <- vector("list", length = nblocks) + OUT$ov.model <- vector("list", length = nblocks) + OUT$ov.y <- vector("list", length = nblocks) + OUT$ov.num <- vector("list", length = nblocks) + OUT$ov.ord <- vector("list", length = nblocks) + OUT$ov.ind <- vector("list", length = nblocks) + OUT$ov.orphan <- vector("list", length = nblocks) + OUT$ov.interaction <- vector("list", length = nblocks) + OUT$th <- vector("list", length = nblocks) + OUT$th.mean <- vector("list", length = nblocks) + + OUT$lv <- vector("list", length = nblocks) + OUT$lv.regular <- vector("list", length = nblocks) + OUT$lv.formative <- vector("list", length = nblocks) + OUT$lv.x <- vector("list", length = nblocks) + OUT$lv.y <- vector("list", length = nblocks) + OUT$lv.nox <- vector("list", length = nblocks) + OUT$lv.nonnormal <- vector("list", length = nblocks) + OUT$lv.interaction <- vector("list", length = nblocks) - for(g in group) { + OUT$eqs.y <- vector("list", length = nblocks) + OUT$eqs.x <- vector("list", length = nblocks) + + for(b in block.select) { # always compute lv.names - lv.names <- unique( partable$lhs[ partable$group == g & + lv.names <- unique( partable$lhs[ partable$block == b & (partable$op == "=~" | partable$op == "<~") ] ) # determine lv interactions - int.names <- unique(partable$rhs[ partable$group == g & + int.names <- unique(partable$rhs[ partable$block == b & grepl(":", partable$rhs) ] ) n.int <- length(int.names) if(n.int > 0L) { ok.idx <- logical(n.int) for(iv in seq_len(n.int)) { NAMES <- strsplit(int.names[iv], ":", fixed = TRUE)[[1L]] - + # three scenario's: # - both variables are latent (ok) # - both variables are observed (ignore) # - only one latent (warn??) -> upgrade observed to latent - # thus if at least one is in lv.names, we treat it as a + # thus if at least one is in lv.names, we treat it as a # latent interaction if(sum(NAMES %in% lv.names) > 0L) { ok.idx[iv] <- TRUE @@ -151,62 +180,62 @@ # store lv if("lv" %in% type) { - OUT$lv[[g]] <- lv.names - } + OUT$lv[[b]] <- lv.names + } # regular latent variables ONLY (ie defined by =~ only) if("lv.regular" %in% type) { - out <- unique( partable$lhs[ partable$group == g & + out <- unique( partable$lhs[ partable$block == b & partable$op == "=~" ] ) - OUT$lv.regular[[g]] <- out + OUT$lv.regular[[b]] <- out } # interaction terms involving latent variables (only) if("lv.interaction" %in% type) { - OUT$lv.interaction[[g]] <- lv.interaction + OUT$lv.interaction[[b]] <- lv.interaction } # formative latent variables ONLY (ie defined by <~ only) if("lv.formative" %in% type) { - out <- unique( partable$lhs[ partable$group == g & + out <- unique( partable$lhs[ partable$block == b & partable$op == "<~" ] ) - OUT$lv.formative[[g]] <- out + OUT$lv.formative[[b]] <- out } # eqs.y - if(!(length(type) == 1L && + if(!(length(type) == 1L && type %in% c("lv", "lv.regular", "lv.nonnormal"))) { - eqs.y <- unique( partable$lhs[ partable$group == g & + eqs.y <- unique( partable$lhs[ partable$block == b & partable$op == "~" ] ) } # store eqs.y if("eqs.y" %in% type) { - OUT$eqs.y[[g]] <- eqs.y + OUT$eqs.y[[b]] <- eqs.y } - + # eqs.x - if(!(length(type) == 1L && + if(!(length(type) == 1L && type %in% c("lv", "lv.regular", "lv.nonnormal","lv.x"))) { - eqs.x <- unique( partable$rhs[ partable$group == g & + eqs.x <- unique( partable$rhs[ partable$block == b & (partable$op == "~" | partable$op == "<~") ] ) } # store eqs.x if("eqs.x" %in% type) { - OUT$eqs.x[[g]] <- eqs.x + OUT$eqs.x[[b]] <- eqs.x } # v.ind -- indicators of latent variables - if(!(length(type) == 1L && + if(!(length(type) == 1L && type %in% c("lv", "lv.regular", "lv.nonnormal"))) { - v.ind <- unique( partable$rhs[ partable$group == g & + v.ind <- unique( partable$rhs[ partable$block == b & partable$op == "=~" ] ) } # ov.* - if(!(length(type) == 1L && + if(!(length(type) == 1L && type %in% c("lv", "lv.regular", "lv.nonnormal", "lv.x","lv.y"))) { # 1. indicators, which are not latent variables themselves ov.ind <- v.ind[ !v.ind %in% lv.names ] @@ -216,7 +245,7 @@ ov.x <- eqs.x[ !eqs.x %in% c(lv.names, ov.ind, ov.y) ] } - # observed variables + # observed variables # easy approach would be: everything that is not in lv.names, # but the main purpose here is to 'order' the observed variables # according to 'type' (indicators, ov.y, ov.x, orphans) @@ -224,15 +253,15 @@ type %in% c("lv", "lv.regular", "lv.nonnormal", "lv.x","lv.y"))) { # 4. orphaned covariances - ov.cov <- c(partable$lhs[ partable$group == g & + ov.cov <- c(partable$lhs[ partable$block == b & partable$op == "~~" & - !partable$lhs %in% lv.names ], - partable$rhs[ partable$group == g & + !partable$lhs %in% lv.names ], + partable$rhs[ partable$block == b & partable$op == "~~" & !partable$rhs %in% lv.names ]) # 5. orphaned intercepts/thresholds - ov.int <- partable$lhs[ partable$group == g & - (partable$op == "~1" | + ov.int <- partable$lhs[ partable$block == b & + (partable$op == "~1" | partable$op == "|") & !partable$lhs %in% lv.names ] @@ -243,11 +272,11 @@ # store ov? if("ov" %in% type) { - OUT$ov[[g]] <- ov.names + OUT$ov[[b]] <- ov.names } if("ov.ind" %in% type) { - OUT$ov.ind[[g]] <- ov.ind + OUT$ov.ind[[b]] <- ov.ind } if("ov.interaction" %in% type) { @@ -273,7 +302,7 @@ ov.interaction <- character(0L) } - OUT$ov.interaction[[g]] <- ov.interaction + OUT$ov.interaction[[b]] <- ov.interaction } @@ -281,46 +310,46 @@ if(any(type %in% c("ov.x","ov.nox","ov.num", "ov.model", "th.mean","lv.nonnormal"))) { # correction: is any of these ov.names.x mentioned as a variance, - # covariance, or intercept? + # covariance, or intercept? # this should trigger a warning in lavaanify() if(is.null(partable$user)) { # FLAT! partable$user <- rep(1L, length(partable$lhs)) } - vars <- c( partable$lhs[ partable$group == g & - partable$op == "~1" & + vars <- c( partable$lhs[ partable$block == b & + partable$op == "~1" & partable$user == 1 ], - partable$lhs[ partable$group == g & - partable$op == "~~" & + partable$lhs[ partable$block == b & + partable$op == "~~" & partable$user == 1 ], - partable$rhs[ partable$group == g & - partable$op == "~~" & + partable$rhs[ partable$block == b & + partable$op == "~~" & partable$user == 1 ] ) idx.no.x <- which(ov.x %in% vars) if(length(idx.no.x)) { if(ov.x.fatal) { - stop("lavaan ERROR: model syntax contains variance/covariance/intercept formulas\n involving (an) exogenous variable(s): [", + stop("lavaan ERROR: model syntax contains variance/covariance/intercept formulas\n involving (an) exogenous variable(s): [", paste(ov.x[idx.no.x], collapse=" "), "];\n Please remove them and try again.") } if(warn) { - warning("lavaan WARNING: model syntax contains variance/covariance/intercept formulas\n involving (an) exogenous variable(s): [", + warning("lavaan WARNING: model syntax contains variance/covariance/intercept formulas\n involving (an) exogenous variable(s): [", paste(ov.x[idx.no.x], collapse=" "), "];\n Please use fixed.x=FALSE or leave them alone") - } + } ov.x <- ov.x[-idx.no.x] } ov.tmp.x <- ov.x # extra if(!is.null(partable$exo)) { - ov.cov <- c(partable$lhs[ partable$group == g & - partable$op == "~~" & + ov.cov <- c(partable$lhs[ partable$block == b & + partable$op == "~~" & partable$exo == 1L], - partable$rhs[ partable$group == g & - partable$op == "~~" & + partable$rhs[ partable$block == b & + partable$op == "~~" & partable$exo == 1L]) - ov.int <- partable$lhs[ partable$group == g & - partable$op == "~1" & + ov.int <- partable$lhs[ partable$block == b & + partable$op == "~1" & partable$exo == 1L ] ov.extra <- unique(c(ov.cov, ov.int)) ov.tmp.x <- c(ov.tmp.x, ov.extra[ !ov.extra %in% ov.tmp.x ]) @@ -328,15 +357,15 @@ ov.names.x <- ov.tmp.x } - + # store ov.x? if("ov.x" %in% type) { - OUT$ov.x[[g]] <- ov.names.x + OUT$ov.x[[b]] <- ov.names.x } # story ov.orphan? if("ov.orphan" %in% type) { - OUT$ov.orphan[[g]] <- ov.extra + OUT$ov.orphan[[b]] <- ov.extra } # ov's withouth ov.x @@ -347,31 +376,31 @@ # store ov.nox if("ov.nox" %in% type) { - OUT$ov.nox[[g]] <- ov.names.nox + OUT$ov.nox[[b]] <- ov.names.nox } # store ov.model if("ov.model" %in% type) { # if no conditional.x, this is just ov # else, this is ov.nox - if(any( partable$group == g & partable$op == "~" & + if(any( partable$block == b & partable$op == "~" & partable$exo == 1L )) { - OUT$ov.model[[g]] <- ov.names.nox + OUT$ov.model[[b]] <- ov.names.nox } else { - OUT$ov.model[[g]] <- ov.names + OUT$ov.model[[b]] <- ov.names } } # ov's strictly ordered - if(any(type %in% c("ov.ord", "th", "th.mean", + if(any(type %in% c("ov.ord", "th", "th.mean", "ov.num", "lv.nonnormal"))) { - tmp <- unique(partable$lhs[ partable$group == g & + tmp <- unique(partable$lhs[ partable$block == b & partable$op == "|" ]) ord.names <- ov.names[ ov.names %in% tmp ] } if("ov.ord" %in% type) { - OUT$ov.ord[[g]] <- ord.names + OUT$ov.ord[[b]] <- ord.names } # ov's strictly numeric (but no x) @@ -380,18 +409,18 @@ } if("ov.num" %in% type) { - OUT$ov.num[[g]] <- ov.num + OUT$ov.num[[b]] <- ov.num } # nonnormal lv's if("lv.nonnormal" %in% type) { # regular lv's - lv.reg <- unique( partable$lhs[ partable$group == g & + lv.reg <- unique( partable$lhs[ partable$block == b & partable$op == "=~" ] ) if(length(lv.reg) > 0L) { out <- unlist( lapply(lv.reg, function(x) { # get indicators for this lv - tmp.ind <- unique( partable$rhs[ partable$group == g & + tmp.ind <- unique( partable$rhs[ partable$block == b & partable$op == "=~" & partable$lhs == x ] ) if(!all(tmp.ind %in% ov.num)) { @@ -400,16 +429,16 @@ return(character(0)) } }) ) - OUT$lv.nonnormal[[g]] <- out + OUT$lv.nonnormal[[b]] <- out } else { - OUT$lv.nonnormal[[g]] <- character(0) + OUT$lv.nonnormal[[b]] <- character(0) } } if(any(c("th","th.mean") %in% type)) { - TH.lhs <- partable$lhs[ partable$group == g & + TH.lhs <- partable$lhs[ partable$block == b & partable$op == "|" ] - TH.rhs <- partable$rhs[ partable$group == g & + TH.rhs <- partable$rhs[ partable$block == b & partable$op == "|" ] } @@ -419,7 +448,7 @@ # return in the right order (following ord.names!) out <- unlist(lapply(ord.names, function(x) { idx <- which(x == TH.lhs) - TH <- unique(paste(TH.lhs[idx], "|", + TH <- unique(paste(TH.lhs[idx], "|", TH.rhs[idx], sep="")) # make sure the th's are in increasing order sort(TH) @@ -427,7 +456,7 @@ } else { out <- character(0L) } - OUT$th[[g]] <- out + OUT$th[[b]] <- out } # thresholds and mean/intercepts of numeric variables @@ -448,7 +477,7 @@ } else { out <- character(0L) } - OUT$th.mean[[g]] <- out + OUT$th.mean[[b]] <- out } @@ -459,25 +488,25 @@ } if("lv.x" %in% type) { - OUT$lv.x[[g]] <- lv.names.x + OUT$lv.x[[b]] <- lv.names.x } - + # dependent ov (but not also indicator or x) if("ov.y" %in% type) { tmp <- eqs.y[ !eqs.y %in% c(v.ind, eqs.x, lv.names) ] - OUT$ov.y[[g]] <- ov.names[ ov.names %in% tmp ] + OUT$ov.y[[b]] <- ov.names[ ov.names %in% tmp ] } # dependent lv (but not also indicator or x) if("lv.y" %in% type) { tmp <- eqs.y[ !eqs.y %in% c(v.ind, eqs.x) & eqs.y %in% lv.names ] - OUT$lv.y[[g]] <- lv.names[ lv.names %in% tmp ] + OUT$lv.y[[b]] <- lv.names[ lv.names %in% tmp ] } # non-exogenous latent variables if("lv.nox" %in% type) { - OUT$lv.nox[[g]] <- lv.names[! lv.names %in% lv.names.x ] + OUT$lv.nox[[b]] <- lv.names[! lv.names %in% lv.names.x ] } } @@ -485,15 +514,13 @@ # to mimic old behaviour, if length(type) == 1L if(length(type) == 1L) { OUT <- OUT[[type]] - # to mimic old behaviour, if specific group is requested - if(is.null(group.orig)) { + # to mimic old behaviour, if specific block is requested + if(ndotdotdot == 0L) { OUT <- unique(unlist(OUT)) - } else if(is.numeric(group.orig) && length(group.orig) == 1L) { - if(length(group.orig) == 1L) { - OUT <- OUT[[group.orig]] - } else { - OUT <- OUT[group.orig] - } + } else if(length(block.select) == 1L) { + OUT <- OUT[[block.select]] + } else { + OUT <- OUT[block.select] } } else { OUT <- OUT[type] diff -Nru r-cran-lavaan-0.5.22/R/lav_polychor.R r-cran-lavaan-0.5.23.1097/R/lav_polychor.R --- r-cran-lavaan-0.5.22/R/lav_polychor.R 2016-03-29 18:31:17.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_polychor.R 2017-02-07 16:32:14.000000000 +0000 @@ -259,7 +259,8 @@ # zero.keep.margins is only used for 2x2 tables pc_cor_TS <- function(Y1, Y2, eXo=NULL, fit.y1=NULL, fit.y2=NULL, freq=NULL, method="nlminb", zero.add = c(0.5, 0.0), control=list(), - zero.keep.margins = TRUE, zero.cell.warn = TRUE, + zero.keep.margins = TRUE, zero.cell.warn = FALSE, + zero.cell.flag = FALSE, verbose=FALSE, Y1.name=NULL, Y2.name=NULL) { # cat("DEBUG: method = ", method, "\n") @@ -273,6 +274,9 @@ stopifnot(min(Y1, na.rm=TRUE) == 1L, min(Y2, na.rm=TRUE) == 1L, method %in% c("nlminb", "BFGS", "nlminb.hessian", "optimize")) + # empty cells or not + empty.cells <- FALSE + # exo or not? exo <- ifelse(length(fit.y1$slope.idx) > 0L, TRUE, FALSE) @@ -285,13 +289,16 @@ if(is.null(freq)) freq <- pc_freq(fit.y1$y,fit.y2$y) nr <- nrow(freq); nc <- ncol(freq) - # check for empty cells -- FIXME: make this an option! - if(zero.cell.warn && any(freq == 0L)) { - if(!is.null(Y1.name) && !is.null(Y2.name)) { + # check for empty cells + if(any(freq == 0L)) { + empty.cells <- TRUE + if(zero.cell.warn) { + if(!is.null(Y1.name) && !is.null(Y2.name)) { warning("lavaan WARNING: empty cell(s) in bivariate table of ", - Y1.name, " x ", Y2.name) - } else { - warning("lavaan WARNING: empty cell(s) in bivariate table") + Y1.name, " x ", Y2.name) + } else { + warning("lavaan WARNING: empty cell(s) in bivariate table") + } } } @@ -302,9 +309,17 @@ if(length(idx) == 2L) { warning("lavaan WARNING: two empty cells in 2x2 table") if(freq[1,1] > 0L) { - return(1.0) + rho <- 1.0 + if(zero.cell.flag) { + attr(rho, "zero.cell.flag") <- empty.cells + } + return(rho) } else { - return(-1.0) + rho <- -1.0 + if(zero.cell.flag) { + attr(rho, "zero.cell.flag") <- empty.cells + } + return(rho) } } else if(length(idx) == 1L && zero.add[1] > 0.0) { if(zero.keep.margins) { @@ -337,12 +352,20 @@ # 1. a*d == c*d storage.mode(freq) <- "numeric" # to avoid integer overflow if(freq[1,1]*freq[2,2] == freq[1,2]*freq[2,1]) { - return(0.0) + rho <- 0.0 + if(zero.cell.flag) { + attr(rho, "zero.cell.flag") <- empty.cells + } + return(rho) } # 2. equal margins (th1 = th2 = 0) if(th.y1[1] == 0L && th.y2[1] == 0L) { # see eg Brown & Benedetti 1977 eq 2 - return( - cos( 2*pi*freq[1,1]/sum(freq) ) ) + rho <- - cos( 2*pi*freq[1,1]/sum(freq) ) + if(zero.cell.flag) { + attr(rho, "zero.cell.flag") <- empty.cells + } + return(rho) } } } @@ -471,6 +494,10 @@ rho <- tanh(out$par) } + if(zero.cell.flag) { + attr(rho, "zero.cell.flag") <- empty.cells + } + rho } diff -Nru r-cran-lavaan-0.5.22/R/lav_predict.R r-cran-lavaan-0.5.23.1097/R/lav_predict.R --- r-cran-lavaan-0.5.22/R/lav_predict.R 2016-06-14 14:51:14.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_predict.R 2017-02-21 19:08:11.000000000 +0000 @@ -27,6 +27,7 @@ lavmodel <- object@Model lavdata <- object@Data lavsamplestats <- object@SampleStats + lavimplied <- object@implied lavpta <- object@pta # type @@ -51,13 +52,13 @@ OV <- lavdata@ov newData <- lavData(data = newdata, group = lavdata@group, - group.label = lavdata@group.label, ov.names = lavdata@ov.names, - ordered = OV$name[ OV$type == "ordered" ], ov.names.x = lavdata@ov.names.x, - std.ov = lavdata@std.ov, - missing = lavdata@missing, - # warn = FALSE, + ordered = OV$name[ OV$type == "ordered" ], + lavoptions = list(std.ov = lavdata@std.ov, + group.label = lavdata@group.label, + missing = lavdata@missing, + warn = FALSE), allow.single.case = TRUE) data.obs <- newData@X eXo <- newData@eXo @@ -73,6 +74,7 @@ out <- lav_predict_eta(lavobject = NULL, lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, + lavimplied = lavimplied, data.obs = data.obs, eXo = eXo, method = method, fsm = fsm, optim.method = optim.method) @@ -102,6 +104,7 @@ } else if(type == "yhat") { out <- lav_predict_yhat(lavobject = NULL, lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, + lavimplied = lavimplied, data.obs = data.obs, eXo = eXo, ETA = NULL, method = method, optim.method = optim.method) @@ -151,6 +154,7 @@ # sub objects lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, + lavimplied = NULL, # new data data.obs = NULL, eXo = NULL, # options @@ -177,12 +181,14 @@ if(all(lavdata@ov$type == "numeric")) { if(method == "ebm") { out <- lav_predict_eta_normal(lavobject = lavobject, - lavmodel = lavmodel, lavdata = lavdata, + lavmodel = lavmodel, lavdata = lavdata, + lavimplied = lavimplied, lavsamplestats = lavsamplestats, data.obs = data.obs, eXo = eXo, fsm = fsm) } else if(method == "bartlett" || method == "bartlet") { out <- lav_predict_eta_bartlett(lavobject = lavobject, lavmodel = lavmodel, lavdata = lavdata, + lavimplied = lavimplied, lavsamplestats = lavsamplestats, data.obs = data.obs, eXo = eXo, fsm = fsm) } else { @@ -207,6 +213,7 @@ # sub objects lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, + lavimplied = NULL, # optional new data data.obs = NULL, eXo = NULL, fsm = FALSE) { @@ -216,9 +223,10 @@ lavmodel <- lavobject@Model lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats + lavimplied <- lavobject@implied } else { stopifnot(!is.null(lavmodel), !is.null(lavdata), - !is.null(lavsamplestats)) + !is.null(lavsamplestats), !is.null(lavimplied)) } if(is.null(data.obs)) { @@ -226,12 +234,25 @@ } # eXo not needed - Sigma.hat <- computeSigmaHat(lavmodel = lavmodel) + # missings? and missing = "ml"? + # impute values under the normal + if(lavdata@missing == "ml") { + for(g in seq_len(lavdata@ngroups)) { + data.obs[[g]] <- + lav_mvnorm_missing_impute_pattern(Y = lavdata@X[[g]], + Mp = lavdata@Mp[[g]], + Mu = lavimplied$mean[[g]], + Sigma = lavimplied$cov[[g]]) + } + } + + + LAMBDA <- computeLAMBDA(lavmodel = lavmodel, remove.dummy.lv = FALSE) + Sigma.hat <- lavimplied$cov Sigma.hat.inv <- lapply(Sigma.hat, solve) VETA <- computeVETA(lavmodel = lavmodel, lavsamplestats = lavsamplestats) EETA <- computeEETA(lavmodel = lavmodel, lavsamplestats = lavsamplestats) EY <- computeEY( lavmodel = lavmodel, lavsamplestats = lavsamplestats) - LAMBDA <- computeLAMBDA(lavmodel = lavmodel, remove.dummy.lv = FALSE) FS <- vector("list", length = lavdata@ngroups) if(fsm) { @@ -281,12 +302,24 @@ } # factor scores - normal case - Bartlett method -# NOTE: this is the classic 'Bartlett' method; for the linear/continuous -# case, this is equivalent to 'ML' +# NOTES: 1) this is the classic 'Bartlett' method; for the linear/continuous +# case, this is equivalent to 'ML' +# 2) the usual formula is: +# FSC = solve(lambda' theta.inv lambda) (lambda' theta.inv) +# BUT to deal with zero or negative variances, we use the +# 'GLS' version instead: +# FSC = solve(lambda' sigma.inv lambda) (lambda' sigma.inv) +# Reference: Bentler & Yuan (1997) 'Optimal Conditionally Unbiased +# Equivariant Factor Score Estimators' +# in Berkane (Ed) 'Latent variable modeling with +# applications to causality' (Springer-Verlag) +# 3) instead of solve(), we use MASS::ginv, for special settings where +# -by construction- (lambda' sigma.inv lambda) is singular lav_predict_eta_bartlett <- function(lavobject = NULL, # for convenience # sub objects lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, + lavimplied = NULL, # optional new data data.obs = NULL, eXo = NULL, fsm = FALSE) { @@ -296,9 +329,10 @@ lavmodel <- lavobject@Model lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats + lavimplied <- lavobject@implied } else { stopifnot(!is.null(lavmodel), !is.null(lavdata), - !is.null(lavsamplestats)) + !is.null(lavsamplestats), !is.null(lavimplied)) } if(is.null(data.obs)) { @@ -306,10 +340,20 @@ } # eXo not needed - LAMBDA <- computeLAMBDA(lavmodel = lavmodel, remove.dummy.lv = FALSE) - THETA <- computeTHETA(lavmodel = lavmodel) - THETA.inv <- lapply(THETA, solve) + # missings? and missing = "ml"? + # impute values under the normal + if(lavdata@missing == "ml") { + for(g in seq_len(lavdata@ngroups)) { + data.obs[[g]] <- + lav_mvnorm_missing_impute_pattern(Y = lavdata@X[[g]], + Mp = lavdata@Mp[[g]], + Mu = lavimplied$mean[[g]], + Sigma = lavimplied$cov[[g]]) + } + } + LAMBDA <- computeLAMBDA(lavmodel = lavmodel, remove.dummy.lv = FALSE) + Sigma.hat.inv <- lapply(lavimplied$cov, solve) EETA <- computeEETA(lavmodel = lavmodel, lavsamplestats = lavsamplestats) EY <- computeEY( lavmodel = lavmodel, lavsamplestats = lavsamplestats) @@ -325,8 +369,8 @@ } # factor score coefficient matrix 'C' - FSC = ( solve(t(LAMBDA[[g]]) %*% THETA.inv[[g]] %*% LAMBDA[[g]]) %*% - t(LAMBDA[[g]]) %*% THETA.inv[[g]] ) + FSC = (MASS::ginv(t(LAMBDA[[g]]) %*% Sigma.hat.inv[[g]] %*% LAMBDA[[g]]) + %*% t(LAMBDA[[g]]) %*% Sigma.hat.inv[[g]] ) if(fsm) { FSM[[g]] <- FSC @@ -504,7 +548,7 @@ if(out$convergence == 0L) { eta.i <- out$par } else { - eta.i <- rep(as.numeric(NA), nfac2) + eta.i <- rep(as.numeric(NA), nfac) } # add dummy ov.y lv values @@ -535,6 +579,7 @@ # sub objects lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, + lavimplied = NULL, # new data data.obs = NULL, eXo = NULL, # ETA values @@ -549,9 +594,10 @@ lavmodel <- lavobject@Model lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats + lavimplied <- lavobject@implied } else { stopifnot(!is.null(lavmodel), !is.null(lavdata), - !is.null(lavsamplestats)) + !is.null(lavsamplestats), !is.null(lavimplied)) } # new data? @@ -566,6 +612,7 @@ if(is.null(ETA)) { ETA <- lav_predict_eta(lavobject = NULL, lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, + lavimplied = lavimplied, data.obs = data.obs, eXo = eXo, method = method, optim.method = optim.method) } else { diff -Nru r-cran-lavaan-0.5.22/R/lav_print.R r-cran-lavaan-0.5.23.1097/R/lav_print.R --- r-cran-lavaan-0.5.22/R/lav_print.R 2016-07-20 09:08:17.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_print.R 2017-02-24 12:42:27.000000000 +0000 @@ -128,14 +128,44 @@ t1.txt <- sprintf(" %10i", attr(x, "bootstrap.successful")) cat(t0.txt, t1.txt, "\n", sep="") } + + # 4. + if(attr(x, "missing") %in% c("two.stage", "robust.two.stage")) { + t0.txt <- sprintf(" %-35s", "Information saturated (h1) model") + tmp.txt <- attr(x, "h1.information") + t1.txt <- sprintf(" %15s", paste(toupper(substring(tmp.txt,1,1)), + substring(tmp.txt,2), sep="")) + cat(t0.txt, t1.txt, "\n", sep="") + if(attr(x, "information") == "observed") { + t0.txt <- sprintf(" %-35s", "Observed information based on") + tmp.txt <- attr(x, "observed.information") + t1.txt <- sprintf(" %15s", + paste(toupper(substring(tmp.txt,1,1)), + substring(tmp.txt,2), sep="")) + cat(t0.txt, t1.txt, "\n", sep="") + } + } } - + # number of groups if(is.null(x$group)) { ngroups <- 1L x$group <- rep(1L, length(x$lhs)) } else { - ngroups <- max(x$group) + ngroups <- lav_partable_ngroups(x) + } + + # number of levels + if(is.null(x$level)) { + nlevels <- 1L + x$level <- rep(1L, length(x$lhs)) + } else { + nlevels <- lav_partable_nlevels(x) + } + + # block column + if(is.null(x$block)) { + x$block <- rep(1L, length(x$lhs)) } # round to 3 digits after the decimal point @@ -149,8 +179,9 @@ }), stringsAsFactors = FALSE) - # always remove group/op/rhs/label/exo columns + # always remove /block/level/group/op/rhs/label/exo columns y$op <- y$group <- y$rhs <- y$label <- y$exo <- NULL + y$block <- y$level <- NULL # if standardized, remove std.nox column (space reasons only) y$std.nox <- NULL @@ -266,166 +297,177 @@ } } - # first the group-specific sections + b <- 0L + # group-specific sections for(g in 1:ngroups) { + # block number + b <- b + 1L + # ov/lv names - ov.names <- lavNames(x, "ov", group = g) - lv.names <- lavNames(x, "lv", group = g) + ov.names <- lavNames(x, "ov", block = b) + lv.names <- lavNames(x, "lv", block = b) # group header if(ngroups > 1L) { group.label <- attr(x, "group.label") - #if(g > 1L) { - cat("\n\n") - #} else { - # cat("\n") - #} + cat("\n\n") cat("Group ", g, " [", group.label[g], "]:\n", sep="") } - # group-specific sections - for(s in GSECTIONS) { - if(s == "Latent Variables") { - row.idx <- which( x$op == "=~" & !x$lhs %in% ov.names & - x$group == g) - if(length(row.idx) == 0L) next - m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) - } else if(s == "Composites") { - row.idx <- which( x$op == "<~" & x$group == g) - if(length(row.idx) == 0L) next - m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) - } else if(s == "Regressions") { - row.idx <- which( x$op == "~" & x$group == g) - if(length(row.idx) == 0L) next - m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) - } else if(s == "Covariances") { - row.idx <- which(x$op == "~~" & x$lhs != x$rhs & !x$exo & - x$group == g) - if(length(row.idx) == 0L) next - # make distinction between residual and plain - y.names <- unique( c(lavNames(x, "eqs.y"), - lavNames(x, "ov.ind")) ) - PREFIX <- rep("", length(row.idx)) - PREFIX[ x$rhs[row.idx] %in% y.names ] <- " ." - m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx], - PREFIX = PREFIX) - #m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) - } else if(s == "Intercepts") { - row.idx <- which(x$op == "~1" & !x$exo & x$group == g) - if(length(row.idx) == 0L) next - # make distinction between intercepts and means - y.names <- unique( c(lavNames(x, "eqs.y"), - lavNames(x, "ov.ind")) ) - PREFIX <- rep("", length(row.idx)) - PREFIX[ x$lhs[row.idx] %in% y.names ] <- " ." - m[row.idx,1] <- .makeNames(x$lhs[row.idx], x$label[row.idx], - PREFIX = PREFIX) - #m[row.idx,1] <- .makeNames(x$lhs[row.idx], x$label[row.idx]) - } else if(s == "Thresholds") { - row.idx <- which(x$op == "|" & x$group == g) - if(length(row.idx) == 0L) next - m[row.idx,1] <- .makeNames(paste(x$lhs[row.idx], "|", - x$rhs[row.idx], sep=""), x$label[row.idx]) - } else if(s == "Variances") { - row.idx <- which(x$op == "~~" & x$lhs == x$rhs & !x$exo & - x$group == g) - if(length(row.idx) == 0L) next - # make distinction between residual and plain - y.names <- unique( c(lavNames(x, "eqs.y"), - lavNames(x, "ov.ind")) ) - PREFIX <- rep("", length(row.idx)) - PREFIX[ x$rhs[row.idx] %in% y.names ] <- " ." - m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx], - PREFIX = PREFIX) - } else if(s == "Scales y*") { - row.idx <- which(x$op == "~*~" & x$group == g) - if(length(row.idx) == 0L) next - m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) - } else if(s == "Group Weight") { - row.idx <- which(x$lhs == "group" & x$op == "%" & x$group == g) - if(length(row.idx) == 0L) next - m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) - } else if(s == "R-Square") { - row.idx <- which(x$op == "r2" & x$group == g) - if(length(row.idx) == 0L) next - m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) - } else { - row.idx <- integer(0L) - } - - # do we need special formatting for this section? - # three types: - # - regular (nothing to do, except row/colnames) - # - R-square - # - Latent Variables (and Composites), Regressions and Covariances - # 'bundle' the output per lhs element - - # bundling - if(s %in% c("Latent Variables", "Composites", - "Regressions", "Covariances")) { - nel <- length(row.idx) - M <- matrix("", nrow = nel*2, ncol = ncol(m)) - colnames(M) <- colnames(m) - rownames(M) <- rep("", NROW(M)) - #colnames(M)[1] <- sprintf("%-17s", paste(s, ":", sep = "")) - LHS <- paste(x$lhs[row.idx], x$op[row.idx]) - lhs.idx <- seq(1, nel*2L, 2L) - rhs.idx <- seq(1, nel*2L, 2L) + 1L - if(s == "Covariances") { + for(l in 1:nlevels) { + + # level header + if(nlevels > 1L) { + level.label <- attr(x, "level.label") + cat("\n\n") + cat("Level ", l, " [", level.label[l], "]:\n", sep="") + } + + # group-specific sections + for(s in GSECTIONS) { + if(s == "Latent Variables") { + row.idx <- which( x$op == "=~" & !x$lhs %in% ov.names & + x$block == b) + if(length(row.idx) == 0L) next + m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) + } else if(s == "Composites") { + row.idx <- which( x$op == "<~" & x$block == b) + if(length(row.idx) == 0L) next + m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) + } else if(s == "Regressions") { + row.idx <- which( x$op == "~" & x$block == b) + if(length(row.idx) == 0L) next + m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) + } else if(s == "Covariances") { + row.idx <- which(x$op == "~~" & x$lhs != x$rhs & !x$exo & + x$block == b) + if(length(row.idx) == 0L) next + # make distinction between residual and plain + y.names <- unique( c(lavNames(x, "eqs.y"), + lavNames(x, "ov.ind")) ) + PREFIX <- rep("", length(row.idx)) + PREFIX[ x$rhs[row.idx] %in% y.names ] <- " ." + m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx], + PREFIX = PREFIX) + #m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) + } else if(s == "Intercepts") { + row.idx <- which(x$op == "~1" & !x$exo & x$block == b) + if(length(row.idx) == 0L) next + # make distinction between intercepts and means + y.names <- unique( c(lavNames(x, "eqs.y"), + lavNames(x, "ov.ind")) ) + PREFIX <- rep("", length(row.idx)) + PREFIX[ x$lhs[row.idx] %in% y.names ] <- " ." + m[row.idx,1] <- .makeNames(x$lhs[row.idx], x$label[row.idx], + PREFIX = PREFIX) + #m[row.idx,1] <- .makeNames(x$lhs[row.idx], x$label[row.idx]) + } else if(s == "Thresholds") { + row.idx <- which(x$op == "|" & x$block == b) + if(length(row.idx) == 0L) next + m[row.idx,1] <- .makeNames(paste(x$lhs[row.idx], "|", + x$rhs[row.idx], sep=""), x$label[row.idx]) + } else if(s == "Variances") { + row.idx <- which(x$op == "~~" & x$lhs == x$rhs & !x$exo & + x$block == b) + if(length(row.idx) == 0L) next # make distinction between residual and plain y.names <- unique( c(lavNames(x, "eqs.y"), lavNames(x, "ov.ind")) ) PREFIX <- rep("", length(row.idx)) - PREFIX[ x$lhs[row.idx] %in% y.names ] <- "." + PREFIX[ x$rhs[row.idx] %in% y.names ] <- " ." + m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx], + PREFIX = PREFIX) + } else if(s == "Scales y*") { + row.idx <- which(x$op == "~*~" & x$block == b) + if(length(row.idx) == 0L) next + m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) + } else if(s == "Group Weight") { + row.idx <- which(x$lhs == "group" & x$op == "%" & x$block == b) + if(length(row.idx) == 0L) next + m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) + } else if(s == "R-Square") { + row.idx <- which(x$op == "r2" & x$block == b) + if(length(row.idx) == 0L) next + m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) } else { - PREFIX <- rep("", length(LHS)) + row.idx <- integer(0L) } - M[lhs.idx, 1] <- sprintf("%1s%-15s", PREFIX, LHS) - M[rhs.idx, ] <- m[row.idx,] - # avoid duplicated LHS labels - if(nel > 1L) { - del.idx <- integer(0) - old.lhs <- "" - for(i in 1:nel) { - if(LHS[i] == old.lhs) { - del.idx <- c(del.idx, lhs.idx[i]) - } - old.lhs <- LHS[i] + + # do we need special formatting for this section? + # three types: + # - regular (nothing to do, except row/colnames) + # - R-square + # - Latent Variables (and Composites), Regressions and Covariances + # 'bundle' the output per lhs element + + # bundling + if(s %in% c("Latent Variables", "Composites", + "Regressions", "Covariances")) { + nel <- length(row.idx) + M <- matrix("", nrow = nel*2, ncol = ncol(m)) + colnames(M) <- colnames(m) + rownames(M) <- rep("", NROW(M)) + #colnames(M)[1] <- sprintf("%-17s", paste(s, ":", sep = "")) + LHS <- paste(x$lhs[row.idx], x$op[row.idx]) + lhs.idx <- seq(1, nel*2L, 2L) + rhs.idx <- seq(1, nel*2L, 2L) + 1L + if(s == "Covariances") { + # make distinction between residual and plain + y.names <- unique( c(lavNames(x, "eqs.y"), + lavNames(x, "ov.ind")) ) + PREFIX <- rep("", length(row.idx)) + PREFIX[ x$lhs[row.idx] %in% y.names ] <- "." + } else { + PREFIX <- rep("", length(LHS)) } - if(length(del.idx) > 0L) { - M <- M[-del.idx,,drop=FALSE] + M[lhs.idx, 1] <- sprintf("%1s%-15s", PREFIX, LHS) + M[rhs.idx, ] <- m[row.idx,] + # avoid duplicated LHS labels + if(nel > 1L) { + del.idx <- integer(0) + old.lhs <- "" + for(i in 1:nel) { + if(LHS[i] == old.lhs) { + del.idx <- c(del.idx, lhs.idx[i]) + } + old.lhs <- LHS[i] + } + if(length(del.idx) > 0L) { + M <- M[-del.idx,,drop=FALSE] + } } + cat("\n", s, ":\n", sep = "") + #cat("\n") + print(M, quote = FALSE) + + # R-square + } else if(s == "R-Square") { + M <- m[row.idx,1:2,drop=FALSE] + colnames(M) <- colnames(m)[1:2] + rownames(M) <- rep("", NROW(M)) + #colnames(M)[1] <- sprintf("%-17s", paste(s, ":", sep = "")) + cat("\n", s, ":\n", sep = "") + #cat("\n") + print(M, quote = FALSE) + + # Regular + } else { + #M <- rbind(matrix("", nrow = 1L, ncol = ncol(m)), + # m[row.idx,]) + M <- m[row.idx,,drop=FALSE] + colnames(M) <- colnames(m) + rownames(M) <- rep("", NROW(M)) + #colnames(M)[1] <- sprintf("%-17s", paste(s, ":", sep = "")) + cat("\n", s, ":\n", sep = "") + #cat("\n") + print(M, quote = FALSE) } - cat("\n", s, ":\n", sep = "") - #cat("\n") - print(M, quote = FALSE) - - # R-square - } else if(s == "R-Square") { - M <- m[row.idx,1:2,drop=FALSE] - colnames(M) <- colnames(m)[1:2] - rownames(M) <- rep("", NROW(M)) - #colnames(M)[1] <- sprintf("%-17s", paste(s, ":", sep = "")) - cat("\n", s, ":\n", sep = "") - #cat("\n") - print(M, quote = FALSE) - - # Regular - } else { - #M <- rbind(matrix("", nrow = 1L, ncol = ncol(m)), - # m[row.idx,]) - M <- m[row.idx,,drop=FALSE] - colnames(M) <- colnames(m) - rownames(M) <- rep("", NROW(M)) - #colnames(M)[1] <- sprintf("%-17s", paste(s, ":", sep = "")) - cat("\n", s, ":\n", sep = "") - #cat("\n") - print(M, quote = FALSE) } - } + + } # groups - } + } # levels # asections for(s in ASECTIONS) { @@ -529,3 +571,34 @@ NAMES } + +summary.lavaan.fsr <- function(object, ...) { + + dotdotdot <- list(...) + if(!is.null(dotdotdot$nd)) { + nd <- dotdotdot$nd + } else { + nd <- 3L + } + + print.lavaan.fsr(x = object, nd = nd) +} + +print.lavaan.fsr <- function(x, ..., nd = 3L) { + + y <- unclass(x) + + # print header + if(!is.null(y$header)) { + cat(y$header) + cat("\n") + } + + # print PE + print.lavaan.parameterEstimates(y$PE, ..., nd = nd) + + invisible(y) +} + + + diff -Nru r-cran-lavaan-0.5.22/R/lav_representation_lisrel.R r-cran-lavaan-0.5.23.1097/R/lav_representation_lisrel.R --- r-cran-lavaan-0.5.22/R/lav_representation_lisrel.R 2016-08-29 13:57:14.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_representation_lisrel.R 2017-02-21 09:05:08.000000000 +0000 @@ -13,6 +13,8 @@ # prepare target list if(is.null(target)) target <- partable + stopifnot(!is.null(target$block)) + # prepare output N <- length(target$lhs) tmp.mat <- character(N); tmp.row <- integer(N); tmp.col <- integer(N) @@ -31,44 +33,33 @@ gamma <- FALSE } - # number of groups - if(is.null(partable$group)) { - partable$group <- rep(1L, length(partable$lhs)) - ngroups <- 1L - } else { - if(is.character(partable$group)) { - group.label <- unique(partable$group) - group.label <- group.label[ nchar(group.label) > 0L ] - ngroups <- length(group.label) - } else { - ngroups <- max(partable$group) - } - } + # number of blocks + nblocks <- lav_partable_nblocks(partable) - ov.dummy.names.nox <- vector("list", ngroups) - ov.dummy.names.x <- vector("list", ngroups) + ov.dummy.names.nox <- vector("list", nblocks) + ov.dummy.names.x <- vector("list", nblocks) if(extra) { - REP.mmNames <- vector("list", ngroups) - REP.mmNumber <- vector("list", ngroups) - REP.mmRows <- vector("list", ngroups) - REP.mmCols <- vector("list", ngroups) - REP.mmDimNames <- vector("list", ngroups) - REP.mmSymmetric <- vector("list", ngroups) + REP.mmNames <- vector("list", nblocks) + REP.mmNumber <- vector("list", nblocks) + REP.mmRows <- vector("list", nblocks) + REP.mmCols <- vector("list", nblocks) + REP.mmDimNames <- vector("list", nblocks) + REP.mmSymmetric <- vector("list", nblocks) } - for(g in 1:ngroups) { + for(g in 1:nblocks) { - # info from user model per group + # info from user model per block if(gamma) { - ov.names <- vnames(partable, "ov.nox", group=g) + ov.names <- vnames(partable, "ov.nox", block=g) } else { - ov.names <- vnames(partable, "ov", group=g) + ov.names <- vnames(partable, "ov", block=g) } nvar <- length(ov.names) - lv.names <- vnames(partable, "lv", group=g); nfac <- length(lv.names) - ov.th <- vnames(partable, "th", group=g); nth <- length(ov.th) - ov.names.x <- vnames(partable, "ov.x",group=g); nexo <- length(ov.names.x) - ov.names.nox <- vnames(partable, "ov.nox",group=g) + lv.names <- vnames(partable, "lv", block=g); nfac <- length(lv.names) + ov.th <- vnames(partable, "th", block=g); nth <- length(ov.th) + ov.names.x <- vnames(partable, "ov.x",block=g); nexo <- length(ov.names.x) + ov.names.nox <- vnames(partable, "ov.nox",block=g) # in this representation, we need to create 'phantom/dummy' latent # variables for all `x' and `y' variables not in lv.names @@ -79,23 +70,23 @@ tmp.names <- unique( partable$lhs[(partable$op == "~" | partable$op == "<~") & - partable$group == g] ) + partable$block == g] ) } else { tmp.names <- unique( c(partable$lhs[(partable$op == "~" | partable$op == "<~") & - partable$group == g], + partable$block == g], partable$rhs[(partable$op == "~" | partable$op == "<~") & - partable$group == g]) ) + partable$block == g]) ) } dummy.names1 <- tmp.names[ !tmp.names %in% lv.names ] # covariances involving dummys - dummy.cov.idx <- which(partable$op == "~~" & partable$group == g & + dummy.cov.idx <- which(partable$op == "~~" & partable$block == g & (partable$lhs %in% dummy.names1 | partable$rhs %in% dummy.names1)) # new in 0.5-21: also include covariances involving these covariances... - dummy.cov.idx1 <- which(partable$op == "~~" & partable$group == g & + dummy.cov.idx1 <- which(partable$op == "~~" & partable$block == g & (partable$lhs %in% partable$lhs[dummy.cov.idx] | partable$rhs %in% partable$rhs[dummy.cov.idx])) dummy.cov.idx <- unique(c(dummy.cov.idx, dummy.cov.idx1)) @@ -128,21 +119,21 @@ } # 1a. "=~" regular indicators - idx <- which(target$group == g & + idx <- which(target$block == g & target$op == "=~" & !(target$rhs %in% lv.names)) tmp.mat[idx] <- "lambda" tmp.row[idx] <- match(target$rhs[idx], ov.names) tmp.col[idx] <- match(target$lhs[idx], lv.names) # 1b. "=~" regular higher-order lv indicators - idx <- which(target$group == g & + idx <- which(target$block == g & target$op == "=~" & !(target$rhs %in% ov.names)) tmp.mat[idx] <- "beta" tmp.row[idx] <- match(target$rhs[idx], lv.names) tmp.col[idx] <- match(target$lhs[idx], lv.names) # 1c. "=~" indicators that are both in ov and lv - idx <- which(target$group == g & + idx <- which(target$block == g & target$op == "=~" & target$rhs %in% ov.names & target$rhs %in% lv.names) tmp.mat[idx] <- "beta" @@ -153,7 +144,7 @@ if(gamma) { # gamma idx <- which(target$rhs %in% ov.names.x & - target$group == g & (target$op == "~" | + target$block == g & (target$op == "~" | target$op == "<~") ) tmp.mat[idx] <- "gamma" tmp.row[idx] <- match(target$lhs[idx], lv.names) @@ -161,13 +152,13 @@ # beta idx <- which(!target$rhs %in% ov.names.x & - target$group == g & (target$op == "~" | + target$block == g & (target$op == "~" | target$op == "<~") ) tmp.mat[idx] <- "beta" tmp.row[idx] <- match(target$lhs[idx], lv.names) tmp.col[idx] <- match(target$rhs[idx], lv.names) } else { - idx <- which(target$group == g & (target$op == "~" | + idx <- which(target$block == g & (target$op == "~" | target$op == "<~") ) tmp.mat[idx] <- "beta" tmp.row[idx] <- match(target$lhs[idx], lv.names) @@ -175,28 +166,28 @@ } # 3a. "~~" ov - idx <- which(target$group == g & + idx <- which(target$block == g & target$op == "~~" & !(target$lhs %in% lv.names)) tmp.mat[idx] <- "theta" tmp.row[idx] <- match(target$lhs[idx], ov.names) tmp.col[idx] <- match(target$rhs[idx], ov.names) # 3b. "~~" lv - idx <- which(target$group == g & + idx <- which(target$block == g & target$op == "~~" & target$rhs %in% lv.names) tmp.mat[idx] <- "psi" tmp.row[idx] <- match(target$lhs[idx], lv.names) tmp.col[idx] <- match(target$rhs[idx], lv.names) # 4a. "~1" ov - idx <- which(target$group == g & + idx <- which(target$block == g & target$op == "~1" & !(target$lhs %in% lv.names)) tmp.mat[idx] <- "nu" tmp.row[idx] <- match(target$lhs[idx], ov.names) tmp.col[idx] <- 1L # 4b. "~1" lv - idx <- which(target$group == g & + idx <- which(target$block == g & target$op == "~1" & target$lhs %in% lv.names) tmp.mat[idx] <- "alpha" tmp.row[idx] <- match(target$lhs[idx], lv.names) @@ -204,7 +195,7 @@ # 5. "|" th LABEL <- paste(target$lhs, target$op, target$rhs, sep="") - idx <- which(target$group == g & + idx <- which(target$block == g & target$op == "|" & LABEL %in% ov.th) TH <- paste(target$lhs[idx], "|", target$rhs[idx], sep="") tmp.mat[idx] <- "tau" @@ -212,7 +203,7 @@ tmp.col[idx] <- 1L # 6. "~*~" scales - idx <- which(target$group == g & + idx <- which(target$block == g & target$op == "~*~") tmp.mat[idx] <- "delta" tmp.row[idx] <- match(target$lhs[idx], ov.names) @@ -227,7 +218,7 @@ } # new 0.5-16: group weights - idx <- which(target$group == g & target$lhs == "group" & + idx <- which(target$block == g & target$lhs == "group" & target$op == "%") tmp.mat[idx] <- "gw" tmp.row[idx] <- 1L @@ -298,7 +289,7 @@ REP.mmDimNames[[g]] <- mmDimNames[ mmNames ] REP.mmSymmetric[[g]] <- unlist(mmSymmetric[ mmNames ]) } # extra - } # ngroups + } # nblocks REP <- list(mat = tmp.mat, row = tmp.row, @@ -923,7 +914,7 @@ # 3) PI # 4) SigmaHat == VYx -# compute MuHat for a single group -- only for the continuous case (no eXo) +# compute MuHat for a single block/group; only for the continuous case (no eXo) # # this is a special case of E(Y) where # - we have no (explicit) eXogenous variables @@ -952,7 +943,7 @@ Mu.hat } -# compute TH for a single group +# compute TH for a single block/group computeTH.LISREL <- function(MLIST=NULL, th.idx=NULL) { LAMBDA <- MLIST$lambda; nvar <- nrow(LAMBDA); nfac <- ncol(LAMBDA) @@ -1014,7 +1005,7 @@ as.vector(TH) } -# compute PI for a single group +# compute PI for a single block/group computePI.LISREL <- function(MLIST=NULL) { LAMBDA <- MLIST$lambda diff -Nru r-cran-lavaan-0.5.22/R/lav_representation.R r-cran-lavaan-0.5.23.1097/R/lav_representation.R --- r-cran-lavaan-0.5.22/R/lav_representation.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_representation.R 2017-02-20 08:50:19.000000000 +0000 @@ -0,0 +1,39 @@ +# user visible function to add 'matrix' entries in the parameter table +lavMatrixRepresentation <- function(partable, representation = "LISREL", + add.attributes = FALSE, + as.data.frame. = TRUE) { + + # check parameter table + partable <- lav_partable_complete(partable) + + # get model matrices + if(representation == "LISREL") { + REP <- representation.LISREL(partable, target = NULL, + extra = add.attributes) + } else { + stop("lavaan ERROR: only representation \"LISREL\" has been implemented.") + } + + partable$mat <- REP$mat + partable$row <- REP$row + partable$col <- REP$col + + if(as.data.frame.) { + partable <- as.data.frame(partable, stringsAsFactors=FALSE) + class(partable) <- c("lavaan.data.frame", "data.frame") + } + + if(add.attributes) { + attr(partable, "ov.dummy.names.nox") <- attr(REP, "ov.dummy.names.nox") + attr(partable, "ov.dummy.names.x") <- attr(REP, "ov.dummy.names.x") + attr(partable, "mmNames") <- attr(REP, "mmNames") + attr(partable, "mmNumber") <- attr(REP, "mmNumber") + attr(partable, "mmRows") <- attr(REP, "mmRows") + attr(partable, "mmCols") <- attr(REP, "mmCols") + attr(partable, "mmDimNames") <- attr(REP, "mmDimNames") + attr(partable, "mmSymmetric") <- attr(REP, "mmSymmetric") + } + + partable +} + diff -Nru r-cran-lavaan-0.5.22/R/lav_residuals.R r-cran-lavaan-0.5.23.1097/R/lav_residuals.R --- r-cran-lavaan-0.5.22/R/lav_residuals.R 2016-09-24 13:11:39.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_residuals.R 2017-02-21 09:50:36.000000000 +0000 @@ -59,7 +59,7 @@ stop("lavaan ERROR: can not compute standardized residuals if there are no free parameters in the model") } - G <- object@Data@ngroups + G <- object@Model@nblocks meanstructure <- object@Model@meanstructure ov.names <- object@Data@ov.names @@ -69,7 +69,7 @@ # fixed.x idx? x.idx <- integer(0) if(object@Options$fixed.x) { - x.idx <- match(vnames(object@ParTable, "ov.x", group=1L), + x.idx <- match(vnames(object@ParTable, "ov.x", block=1L), object@Data@ov.names[[1L]]) ### FIXME!!!! will not ### work for different } ### models in groups @@ -86,11 +86,7 @@ augUser$free[ idx ] <- max(augUser$free) + 1:length(idx) #augUser$unco[idx ] <- max(augUser$unco) + 1:length(idx) augModel <- lav_model(lavpartable = augUser, - representation = object@Options$representation, - conditional.x = object@Options$conditional.x, - parameterization = object@Options$parameterization, - link = object@Options$link, - debug = object@Options$debug) + lavoptions = object@Options) VarCov <- lav_model_vcov(lavmodel = augModel, lavsamplestats = object@SampleStats, lavdata = object@Data, @@ -141,7 +137,7 @@ } nvar <- ncol(S) - # residuals (for this group) + # residuals (for this block) if(type == "cor.bollen") { if(object@Model@conditional.x) { R[[g]]$cov <- cov2cor(S) - cov2cor(object@implied$res.cov[[g]]) @@ -196,7 +192,7 @@ R[[g]]$th <- R[[g]]$th[ -NUM.idx ] } if(labels) { - names(R[[g]]$th) <- vnames(object@ParTable, type="th", group=g) + names(R[[g]]$th) <- vnames(object@ParTable, type="th", block=g) } } @@ -336,7 +332,7 @@ if(G == 1) { R <- R[[1]] } else { - names(R) <- unlist(object@Data@group.label) + names(R) <- unlist(object@Data@block.label) } R diff -Nru r-cran-lavaan-0.5.22/R/lav_samplestats_gamma.R r-cran-lavaan-0.5.23.1097/R/lav_samplestats_gamma.R --- r-cran-lavaan-0.5.22/R/lav_samplestats_gamma.R 2016-01-28 16:17:22.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_samplestats_gamma.R 2017-01-31 18:48:02.000000000 +0000 @@ -1,6 +1,11 @@ # YR 21 March 2015 -# new approach to compute 'Gamma': the asymptotic variance matrix of the +# new approach to compute 'Gamma': the asymptotic variance matrix of +# sqrt{N} times the # observed sample statistics (means + varcov) +# +# Gamma = N x ACOV[ ybar, vech(S) ] +# = NACOV[ ybar, vech(S) ] +# # - one single function for mean + cov # - handle 'fixed.x' exogenous covariates # - YR 3 Dec 2015: allow for conditional.x = TRUE @@ -33,8 +38,9 @@ } lavdata <- lavData(data = object, group = group, ov.names = NAMES, ordered = NULL, - ov.names.x = ov.names.x, warn = FALSE, - missing = missing) + ov.names.x = ov.names.x, + lavoptions = list(warn = FALSE, + missing = missing)) } else { stop("lavaan ERROR: lavGamma can not handle objects of class ", paste(class(object), collapse= " ")) diff -Nru r-cran-lavaan-0.5.22/R/lav_samplestats.R r-cran-lavaan-0.5.23.1097/R/lav_samplestats.R --- r-cran-lavaan-0.5.22/R/lav_samplestats.R 2016-08-26 14:10:01.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_samplestats.R 2017-02-24 12:31:02.000000000 +0000 @@ -21,6 +21,8 @@ group.w.free = FALSE, WLS.V = NULL, NACOV = NULL, + se = "standard", + information = "expected", ridge = 1e-5, optim.method = "nlminb", zero.add = c(0.5, 0.0), @@ -36,6 +38,7 @@ if(!is.null(lavdata)) { X <- lavdata@X; Mp <- lavdata@Mp ngroups <- lavdata@ngroups + nlevels <- lavdata@nlevels nobs <- lavdata@nobs ov.names <- lavdata@ov.names ov.names.x <- lavdata@ov.names.x @@ -46,8 +49,9 @@ X <- DataX eXo <- DataeXo ngroups <- length(X) - Mp <- vector("list", length=ngroups) - nobs <- vector("list", length=ngroups) + nlevels <- 1L # for now + Mp <- vector("list", length = ngroups) + nobs <- vector("list", length = ngroups) for(g in 1:ngroups) { if(missing != "listwise") { Mp[[g]] <- lav_data_missing_patterns(X[[g]], sort.freq = FALSE, @@ -64,48 +68,50 @@ # sample statistics per group # joint (y,x) - cov <- vector("list", length=ngroups) - var <- vector("list", length=ngroups) - mean <- vector("list", length=ngroups) - th <- vector("list", length=ngroups) - th.idx <- vector("list", length=ngroups) - th.names <- vector("list", length=ngroups) + cov <- vector("list", length = ngroups) + var <- vector("list", length = ngroups) + mean <- vector("list", length = ngroups) + th <- vector("list", length = ngroups) + th.idx <- vector("list", length = ngroups) + th.names <- vector("list", length = ngroups) # residual (y | x) - res.cov <- vector("list", length=ngroups) - res.var <- vector("list", length=ngroups) - res.th <- vector("list", length=ngroups) - res.th.nox <- vector("list", length=ngroups) - res.slopes <- vector("list", length=ngroups) - res.int <- vector("list", length=ngroups) + res.cov <- vector("list", length = ngroups) + res.var <- vector("list", length = ngroups) + res.th <- vector("list", length = ngroups) + res.th.nox <- vector("list", length = ngroups) + res.slopes <- vector("list", length = ngroups) + res.int <- vector("list", length = ngroups) # fixed.x - mean.x <- vector("list", length=ngroups) - cov.x <- vector("list", length=ngroups) + mean.x <- vector("list", length = ngroups) + cov.x <- vector("list", length = ngroups) # binary/ordinal - bifreq <- vector("list", length=ngroups) + bifreq <- vector("list", length = ngroups) # extra sample statistics per group - icov <- vector("list", length=ngroups) - cov.log.det <- vector("list", length=ngroups) - res.icov <- vector("list", length=ngroups) - res.cov.log.det <- vector("list", length=ngroups) - WLS.obs <- vector("list", length=ngroups) - missing. <- vector("list", length=ngroups) - missing.h1. <- vector("list", length=ngroups) - missing.flag. <- FALSE + icov <- vector("list", length = ngroups) + cov.log.det <- vector("list", length = ngroups) + res.icov <- vector("list", length = ngroups) + res.cov.log.det <- vector("list", length = ngroups) + WLS.obs <- vector("list", length = ngroups) + missing. <- vector("list", length = ngroups) + missing.h1. <- vector("list", length = ngroups) + missing.flag. <- FALSE + zero.cell.tables <- vector("list", length = ngroups) + YLp <- vector("list", length = ngroups) # group weights - group.w <- vector("list", length=ngroups) + group.w <- vector("list", length = ngroups) # convenience? # FIXME! - x.idx <- vector("list", length=ngroups) + x.idx <- vector("list", length = ngroups) - WLS.VD <- vector("list", length=ngroups) + WLS.VD <- vector("list", length = ngroups) if(is.null(WLS.V)) { - WLS.V <- vector("list", length=ngroups) + WLS.V <- vector("list", length = ngroups) WLS.V.user <- FALSE } else { if(!is.list(WLS.V)) { @@ -137,7 +143,7 @@ NACOV.compute <- TRUE if(is.null(NACOV)) { - NACOV <- vector("list", length=ngroups) + NACOV <- vector("list", length = ngroups) NACOV.user <- FALSE } else if(is.logical(NACOV)) { if(!NACOV) { @@ -146,7 +152,7 @@ NACOV.compute <- TRUE } NACOV.user <- FALSE - NACOV <- vector("list", length=ngroups) + NACOV <- vector("list", length = ngroups) } else { NACOV.compute <- FALSE if(!is.list(NACOV)) { @@ -165,15 +171,16 @@ # FIXME: check dimension of NACOV!! } + # compute some sample statistics per group for(g in 1:ngroups) { # check nobs if(nobs[[g]] < 2L) { if(nobs[[g]] == 0L) { - stop("lavaan ERROR: data contains no observations", + stop("lavaan ERROR: data contains no observations", ifelse(ngroups > 1L, paste(" in group ", g, sep=""), "")) } else { - stop("lavaan ERROR: data contains only a single observation", + stop("lavaan ERROR: data contains only a single observation", ifelse(ngroups > 1L, paste(" in group ", g, sep=""), "")) } } @@ -211,7 +218,14 @@ ov.levels <- DataOv$nlev[ match(ov.names[[g]], DataOv$name) ] CAT <- list() if("ordered" %in% ov.types) { - categorical <- TRUE + if(nlevels > 1L) { + stop("lavaan ERROR: multilevel + categorical not supported yet.") + } else { + categorical <- TRUE + } + } + + if(categorical) { if(estimator %in% c("ML","REML","PML","FML","MML","none")) { WLS.W <- FALSE } else { @@ -220,8 +234,8 @@ if(verbose) { cat("Estimating sample thresholds and correlations ... ") } - CAT <- muthen1984(Data=X[[g]], - ov.names=ov.names[[g]], + CAT <- muthen1984(Data=X[[g]], + ov.names=ov.names[[g]], ov.types=ov.types, ov.levels=ov.levels, ov.names.x=ov.names.x[[g]], @@ -232,8 +246,11 @@ optim.method = optim.method, zero.add = zero.add, zero.keep.margins = zero.keep.margins, - zero.cell.warn = zero.cell.warn, + zero.cell.warn = FALSE, + zero.cell.tables = TRUE, verbose=debug) + # empty cell tables + zero.cell.tables[[g]] <- CAT$zero.cell.tables if(verbose) cat("done\n") } @@ -279,7 +296,7 @@ } else { # var/cov var[[g]] <- unlist(CAT$VAR) - cov[[g]] <- unname(CAT$COV) + cov[[g]] <- unname(CAT$COV) # th also contains the means of numeric variables th[[g]] <- unlist(CAT$TH) @@ -294,8 +311,24 @@ } } - - } else { # continuous case + + } else if(nlevels > 1L) { # continuous, multilevel setting + + # overwrite later with within cov? -- used for starting values + cov[[g]] <- stats::cov(X[[g]], use = "pairwise") + var[[g]] <- diag(cov[[g]]) + # rescale cov by (N-1)/N? (only COV!) + if(rescale) { + # we 'transform' the sample cov (divided by n-1) + # to a sample cov divided by 'n' + cov[[g]] <- (nobs[[g]]-1)/nobs[[g]] * cov[[g]] + } + mean[[g]] <- colMeans(X[[g]], na.rm = TRUE) + + #YLp[[g]] <- lav_samplestats_cluster_patterns(Y = X[[g]], + # Lp = Lp[[g]]) + + } else { # continuous, single-level case if(conditional.x) { # residual covariances! @@ -303,7 +336,7 @@ # FIXME: how to handle missing data here? Y <- cbind(X[[g]], eXo[[g]]) COV <- unname( stats::cov(Y, use="pairwise")) - MEAN <- unname( apply(Y, 2, base::mean, na.rm=TRUE) ) + MEAN <- unname( colMeans(Y, na.rm=TRUE) ) var[[g]] <- diag(COV) cov[[g]] <- COV # rescale cov by (N-1)/N? (only COV!) @@ -320,7 +353,7 @@ C <- COV[ x.idx[[g]], x.idx[[g]], drop=FALSE] # FIXME: make robust against singular C!!! res.cov[[g]] <- A - B %*% solve(C) %*% t(B) - res.var[[g]] <- diag( cov[[g]] ) + res.var[[g]] <- diag( cov[[g]] ) MY <- MEAN[-x.idx[[g]]]; MX <- MEAN[x.idx[[g]]] @@ -332,6 +365,23 @@ res.int[[g]] <- COEF[1,] # intercepts res.slopes[[g]] <- t(COEF[-1,,drop = FALSE]) # slopes + } else if(missing == "two.stage" || + missing == "robust.two.stage") { + stopifnot(!conditional.x) # for now + missing.flag. <- FALSE #!!! just use sample statistics + missing.[[g]] <- + lav_samplestats_missing_patterns(Y = X[[g]], + Mp = Mp[[g]]) + out <- lav_mvnorm_missing_h1_estimate_moments(Y = X[[g]], + Mp = Mp[[g]], Yp = missing.[[g]], verbose = verbose) + missing.h1.[[g]]$sigma <- out$Sigma + missing.h1.[[g]]$mu <- out$Mu + missing.h1.[[g]]$h1 <- out$fx + + # here, sample statistics == EM estimates + cov[[g]] <- missing.h1.[[g]]$sigma + var[[g]] <- diag(cov[[g]]) + mean[[g]] <- missing.h1.[[g]]$mu } else { cov[[g]] <- stats::cov(X[[g]], use = "pairwise") var[[g]] <- diag(cov[[g]]) @@ -341,7 +391,7 @@ # to a sample cov divided by 'n' cov[[g]] <- (nobs[[g]]-1)/nobs[[g]] * cov[[g]] } - mean[[g]] <- apply(X[[g]], 2, base::mean, na.rm=TRUE) + mean[[g]] <- colMeans(X[[g]], na.rm=TRUE) } @@ -366,18 +416,27 @@ # WLS.obs - WLS.obs[[g]] <- lav_samplestats_wls_obs(mean.g = mean[[g]], - cov.g = cov[[g]], var.g = var[[g]], th.g = th[[g]], - th.idx.g = th.idx[[g]], res.int.g = res.int[[g]], - res.cov.g = res.cov[[g]], res.var.g = res.var[[g]], - res.th.g = res.th[[g]], res.slopes.g = res.slopes[[g]], - group.w.g = group.w[[g]], - categorical = categorical, conditional.x = conditional.x, - meanstructure = meanstructure, slopestructure = conditional.x, - group.w.free = group.w.free) + if(nlevels == 1L) { + WLS.obs[[g]] <- lav_samplestats_wls_obs(mean.g = mean[[g]], + cov.g = cov[[g]], var.g = var[[g]], th.g = th[[g]], + th.idx.g = th.idx[[g]], res.int.g = res.int[[g]], + res.cov.g = res.cov[[g]], res.var.g = res.var[[g]], + res.th.g = res.th[[g]], res.slopes.g = res.slopes[[g]], + group.w.g = group.w[[g]], + categorical = categorical, conditional.x = conditional.x, + meanstructure = meanstructure, slopestructure = conditional.x, + group.w.free = group.w.free) + + } # if missing = "fiml", sample statistics per pattern if(missing == "ml") { + if(nlevels > 1L) { + stop("lavaan ERROR: multilevel + fiml not supported yet") + } + if(conditional.x) { + stop("lavaan ERROR: multilevel + conditional.x not supported yet") + } stopifnot(!conditional.x) # for now missing.flag. <- TRUE missing.[[g]] <- @@ -387,17 +446,16 @@ #cat("missing.h1 = "); print(missing.h1); cat("\n") if(missing.h1) { # estimate moments unrestricted model - out <- estimate.moments.EM(Y = X[[g]], Mp = Mp[[g]], - Yp = missing.[[g]], - verbose = verbose) - missing.h1.[[g]]$sigma <- out$sigma - missing.h1.[[g]]$mu <- out$mu + out <- lav_mvnorm_missing_h1_estimate_moments(Y = X[[g]], + Mp = Mp[[g]], Yp = missing.[[g]], verbose = verbose) + missing.h1.[[g]]$sigma <- out$Sigma + missing.h1.[[g]]$mu <- out$Mu missing.h1.[[g]]$h1 <- out$fx } } # NACOV (=GAMMA) - if(!NACOV.user) { + if(!NACOV.user && nlevels == 1L) { if(estimator == "ML" && !missing.flag. && NACOV.compute) { if(conditional.x) { Y <- Y @@ -432,8 +490,8 @@ } else { txt <- "\n" } - warning("lavaan WARNING: number of observations (", - nrow(X[[g]]), ") too small to compute Gamma", + warning("lavaan WARNING: number of observations (", + nrow(X[[g]]), ") too small to compute Gamma", txt) } if(conditional.x) { @@ -441,7 +499,7 @@ } else { Y <- X[[g]] } - NACOV[[g]] <- + NACOV[[g]] <- lav_samplestats_Gamma(Y = Y, x.idx = x.idx[[g]], fixed.x = fixed.x, @@ -458,7 +516,7 @@ } # WLS.V - if(!WLS.V.user) { + if(!WLS.V.user && nlevels == 1L) { if(estimator == "GLS") { # Note: we need the 'original' COV/MEAN/ICOV # sample statistics; not the 'residual' version @@ -477,8 +535,12 @@ WLS.V[[g]][1:nvar, 1:nvar] <- WLS.V[[g]][1:nvar, 1:nvar, drop = FALSE] * nobs[[g]]/(nobs[[g]]-1) } + + } else if(estimator == "ML") { # no WLS.V here, since function of model-implied moments + + } else if(estimator %in% c("WLS","DWLS","ULS")) { if(!categorical) { if(estimator == "WLS") { @@ -510,7 +572,7 @@ } else { idacov <- 1/dacov } - WLS.V[[g]] <- diag(idacov, nrow=NROW(NACOV[[g]]), + WLS.V[[g]] <- diag(idacov, nrow=NROW(NACOV[[g]]), ncol=NCOL(NACOV[[g]])) WLS.VD[[g]] <- idacov } else if(estimator == "ULS") { @@ -558,7 +620,6 @@ # construct SampleStats object lavSampleStats <- new("lavSampleStats", - CAT = CAT, # debug only # sample moments th = th, th.idx = th.idx, @@ -598,12 +659,25 @@ NACOV = NACOV, NACOV.user = NACOV.user, + # cluster/levels + YLp = YLp, + # missingness missing.flag = missing.flag., missing = missing., - missing.h1 = missing.h1. + missing.h1 = missing.h1., + zero.cell.tables = zero.cell.tables ) + # just a SINGLE warning if we have empty cells + if(categorical && zero.cell.warn && + any(sapply(zero.cell.tables, nrow) > 0L)) { + nempty <- sum(sapply(zero.cell.tables, nrow)) + warning("lavaan WARNING: ", nempty, + " bivariate tables have empty cells; to see them, use:\n", + " lavInspect(fit, \"zero.cell.tables\")") + } + lavSampleStats } @@ -639,40 +713,42 @@ # sample statistics per group - cov <- vector("list", length=ngroups) - var <- vector("list", length=ngroups) - mean <- vector("list", length=ngroups) - th <- vector("list", length=ngroups) - th.idx <- vector("list", length=ngroups) - th.names <- vector("list", length=ngroups) + cov <- vector("list", length = ngroups) + var <- vector("list", length = ngroups) + mean <- vector("list", length = ngroups) + th <- vector("list", length = ngroups) + th.idx <- vector("list", length = ngroups) + th.names <- vector("list", length = ngroups) # residual (y | x) - res.cov <- vector("list", length=ngroups) - res.var <- vector("list", length=ngroups) - res.th <- vector("list", length=ngroups) - res.th.nox <- vector("list", length=ngroups) - res.slopes <- vector("list", length=ngroups) - res.int <- vector("list", length=ngroups) + res.cov <- vector("list", length = ngroups) + res.var <- vector("list", length = ngroups) + res.th <- vector("list", length = ngroups) + res.th.nox <- vector("list", length = ngroups) + res.slopes <- vector("list", length = ngroups) + res.int <- vector("list", length = ngroups) # fixed.x - mean.x <- vector("list", length=ngroups) - cov.x <- vector("list", length=ngroups) + mean.x <- vector("list", length = ngroups) + cov.x <- vector("list", length = ngroups) - bifreq <- vector("list", length=ngroups) + bifreq <- vector("list", length = ngroups) # extra sample statistics per group - icov <- vector("list", length=ngroups) - cov.log.det <- vector("list", length=ngroups) - res.icov <- vector("list", length=ngroups) - res.cov.log.det <- vector("list", length=ngroups) - WLS.obs <- vector("list", length=ngroups) - missing. <- vector("list", length=ngroups) - missing.h1. <- vector("list", length=ngroups) + icov <- vector("list", length = ngroups) + cov.log.det <- vector("list", length = ngroups) + res.icov <- vector("list", length = ngroups) + res.cov.log.det <- vector("list", length = ngroups) + WLS.obs <- vector("list", length = ngroups) + missing. <- vector("list", length = ngroups) + missing.h1. <- vector("list", length = ngroups) missing.flag. <- FALSE + zero.cell.tables <- vector("list", length = ngroups) + YLp <- vector("list", length = ngroups) # group weights - group.w <- vector("list", length=ngroups) - x.idx <- vector("list", length=ngroups) + group.w <- vector("list", length = ngroups) + x.idx <- vector("list", length = ngroups) # for now, we do NOT support categorical data (using moments only), # fixed.x, and conditional.x @@ -681,9 +757,9 @@ conditional.x <- FALSE - WLS.VD <- vector("list", length=ngroups) + WLS.VD <- vector("list", length = ngroups) if(is.null(WLS.V)) { - WLS.V <- vector("list", length=ngroups) + WLS.V <- vector("list", length = ngroups) WLS.V.user <- FALSE } else { if(!is.list(WLS.V)) { @@ -714,7 +790,7 @@ } if(is.null(NACOV)) { - NACOV <- vector("list", length=ngroups) + NACOV <- vector("list", length = ngroups) NACOV.user <- FALSE } else { if(!is.list(NACOV)) { @@ -869,7 +945,6 @@ # construct SampleStats object lavSampleStats <- new("lavSampleStats", - # sample moments th = th, th.idx = th.idx, @@ -912,11 +987,14 @@ NACOV = NACOV, NACOV.user = NACOV.user, + # cluster/level + YLp = YLp, + # missingness missing.flag = missing.flag., missing = missing., - missing.h1 = missing.h1. - + missing.h1 = missing.h1., + zero.cell.tables = zero.cell.tables ) lavSampleStats @@ -959,3 +1037,17 @@ Yp } +# compute sample statistics, per cluster +lav_samplestats_cluster_patterns <- function(Y = NULL, Lp = NULL) { + + # coerce Y to matrix + Y <- as.matrix(Y) + + if(is.null(Lp)) { + stop("lavaan ERROR: Lp is NULL") + } + + YLp <- vector("list", length = length(Lp$cluster)) + + YLp +} diff -Nru r-cran-lavaan-0.5.22/R/lav_samplestats_step2.R r-cran-lavaan-0.5.23.1097/R/lav_samplestats_step2.R --- r-cran-lavaan-0.5.22/R/lav_samplestats_step2.R 2015-11-26 09:49:17.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_samplestats_step2.R 2017-02-07 15:58:17.000000000 +0000 @@ -1,16 +1,24 @@ -lav_samplestats_step2 <- function(UNI = NULL, - ov.names = NULL, # error message only +lav_samplestats_step2 <- function(UNI = NULL, + ov.names = NULL, # error message only # polychoric and empty cells - zero.add = c(0.5, 0.0), + zero.add = c(0.5, 0.0), zero.keep.margins = TRUE, - zero.cell.warn = TRUE, + zero.cell.warn = FALSE, - optim.method = "nlminb") { + # keep track of tables with zero cells? + zero.cell.tables = TRUE, + + optim.method = "nlminb") { nvar <- length(UNI) COR <- diag(nvar) + if(zero.cell.tables) { + zero.var1 <- character(0L) + zero.var2 <- character(0L) + } + # one-by-one (for now) for(j in seq_len(nvar-1L)) { for(i in (j+1L):nvar) { @@ -45,8 +53,16 @@ zero.add = zero.add, zero.keep.margins = zero.keep.margins, zero.cell.warn = zero.cell.warn, + zero.cell.flag = zero.cell.tables, Y1.name = ov.names[i], Y2.name = ov.names[j]) + if(zero.cell.tables) { + if(attr(out, "zero.cell.flag")) { + zero.var1 <- c(zero.var1, ov.names[j]) + zero.var2 <- c(zero.var2, ov.names[i]) + } + attr(out, "zero.cell.flag") <- NULL + } COR[i,j] <- COR[j,i] <- out } # check for near 1.0 correlations @@ -56,5 +72,11 @@ } } + # keep track of tables with zero cells + if(zero.cell.tables) { + zero.cell.tables <- cbind(zero.var1, zero.var2) + attr(COR, "zero.cell.tables") <- zero.cell.tables + } + COR } diff -Nru r-cran-lavaan-0.5.22/R/lav_simulate.R r-cran-lavaan-0.5.23.1097/R/lav_simulate.R --- r-cran-lavaan-0.5.22/R/lav_simulate.R 2015-11-24 16:36:27.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_simulate.R 2017-02-21 11:05:32.000000000 +0000 @@ -26,7 +26,7 @@ # data properties sample.nobs = 500L, ov.var = NULL, - group.label = paste("G", 1:ngroups, sep=""), + group.label = paste("G", 1:nblocks, sep=""), skewness = NULL, kurtosis = NULL, @@ -41,9 +41,9 @@ ) { if(!is.null(seed)) set.seed(seed) - if(!exists(".Random.seed", envir = .GlobalEnv)) - runif(1) # initialize the RNG if necessary - RNGstate <- .Random.seed + #if(!exists(".Random.seed", envir = .GlobalEnv)) + # runif(1) # initialize the RNG if necessary + #RNGstate <- .Random.seed # lavaanify if(is.list(model)) { @@ -124,7 +124,7 @@ # so there is no need to make a distinction between numeric/ordered # here?? lav2 <- lav - ngroups <- max(lav$group) + nblocks <- lav_partable_nblocks(lav) ov.names <- vnames(lav, "ov") ov.nox <- vnames(lav, "ov.nox") lv.names <- vnames(lav, "lv") @@ -137,7 +137,7 @@ warning("lavaan WARNING: if residual variances are specified, please use standardized=FALSE") } lav2$ustart[c(ov.var.idx,lv.var.idx)] <- 0.0 - fit <- lavaan(model=lav2, sample.nobs=sample.nobs, ...) + fit <- lavaan(model=lav2, sample.nobs=sample.nobs, ...) Sigma.hat <- computeSigmaHat(lavmodel = fit@Model) ETA <- computeVETA(lavmodel = fit@Model, lavsamplestats = fit@SampleStats) @@ -148,20 +148,20 @@ } # standardized OV - for(g in 1:ngroups) { - var.group <- which(lav$op == "~~" & lav$lhs %in% ov.nox & - lav$rhs == lav$lhs & lav$group == g) + for(g in 1:nblocks) { + var.block <- which(lav$op == "~~" & lav$lhs %in% ov.nox & + lav$rhs == lav$lhs & lav$block == g) ov.idx <- match(ov.nox, ov.names) - lav$ustart[var.group] <- 1 - diag(Sigma.hat[[g]])[ov.idx] + lav$ustart[var.block] <- 1 - diag(Sigma.hat[[g]])[ov.idx] } # standardize LV if(length(lv.y) > 0L) { - for(g in 1:ngroups) { - var.group <- which(lav$op == "~~" & lav$lhs %in% lv.y & - lav$rhs == lav$lhs & lav$group == g) + for(g in 1:nblocks) { + var.block <- which(lav$op == "~~" & lav$lhs %in% lv.y & + lav$rhs == lav$lhs & lav$block == g) eta.idx <- match(lv.y, lv.names) - lav$ustart[var.group] <- 1 - diag(ETA[[g]])[eta.idx] + lav$ustart[var.block] <- 1 - diag(ETA[[g]])[eta.idx] } } @@ -205,14 +205,14 @@ if(exists("TH")) print(TH) } - # ngroups - ngroups <- length(sample.nobs) + # nblocks + nblocks <- length(sample.nobs) # prepare - X <- vector("list", length=ngroups) - out <- vector("list", length=ngroups) + X <- vector("list", length=nblocks) + out <- vector("list", length=nblocks) - for(g in 1:ngroups) { + for(g in 1:nblocks) { COV <- Sigma.hat[[g]] # if empirical = TRUE, rescale by N/(N-1), so that estimator=ML @@ -231,7 +231,7 @@ # first generate Z Z <- ValeMaurelli1983(n = sample.nobs[g], COR = cov2cor(COV), - skewness = skewness, # FIXME: per group? + skewness = skewness, # FIXME: per block? kurtosis = kurtosis, debug = debug) # rescale @@ -250,13 +250,13 @@ } # any categorical variables? - ov.ord <- vnames(lav, type="ov.ord", group=g) + ov.ord <- vnames(lav, type="ov.ord", block = g) if(length(ov.ord) > 0L) { - ov.names <- vnames(lav, type="ov", group=g) + ov.names <- vnames(lav, type="ov", block = g) # use thresholds to cut for(o in ov.ord) { o.idx <- which(o == ov.names) - th.idx <- which(lav$op == "|" & lav$lhs == o & lav$group == g) + th.idx <- which(lav$op == "|" & lav$lhs == o & lav$block == g) th.val <- c(-Inf,sort(lav$ustart[th.idx]),+Inf) X[[g]][,o.idx] <- as.integer(cut(X[[g]][,o.idx], th.val)) } @@ -266,7 +266,7 @@ } if(return.type == "matrix") { - if(ngroups == 1L) { + if(nblocks == 1L) { return(X[[1L]]) } else { return(X) @@ -276,14 +276,14 @@ Data <- X[[1L]] # if multiple groups, add group column - if(ngroups > 1L) { - for(g in 2:ngroups) { + if(nblocks > 1L) { + for(g in 2:nblocks) { Data <- rbind(Data, X[[g]]) } - Data$group <- rep(1:ngroups, times=sample.nobs) + Data$group <- rep(1:nblocks, times=sample.nobs) } - var.names <- vnames(fit@ParTable, type="ov", group=1L) - if(ngroups > 1L) var.names <- c(var.names, "group") + var.names <- vnames(fit@ParTable, type="ov", block=1L) + if(nblocks > 1L) var.names <- c(var.names, "group") names(Data) <- var.names if(return.fit) { attr(Data, "fit") <- fit @@ -291,7 +291,7 @@ return(Data) } else if (return.type == "cov") { - if(ngroups == 1L) { + if(nblocks == 1L) { return(cov(X[[1L]])) } else { cov.list <- lapply(X, cov) diff -Nru r-cran-lavaan-0.5.22/R/lav_standardize.R r-cran-lavaan-0.5.23.1097/R/lav_standardize.R --- r-cran-lavaan-0.5.22/R/lav_standardize.R 2016-04-26 18:27:55.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_standardize.R 2017-02-21 10:02:14.000000000 +0000 @@ -6,8 +6,7 @@ est[free.idx] <- x # take care of setResidualElements... - lavmodel <- lav_model_set_parameters(lavmodel = lavobject@Model, x = x, - estimator = lavobject@Options$estimator) + lavmodel <- lav_model_set_parameters(lavmodel = lavobject@Model, x = x) GLIST <- lavmodel@GLIST standardize.est.lv(lavobject = lavobject, partable = partable, est = est, @@ -22,8 +21,7 @@ est[free.idx] <- x # take care of setResidualElements... - lavmodel <- lav_model_set_parameters(lavmodel = lavobject@Model, x = x, - estimator = lavobject@Options$estimator) + lavmodel <- lav_model_set_parameters(lavmodel = lavobject@Model, x = x) GLIST <- lavmodel@GLIST standardize.est.all(lavobject = lavobject, partable = partable, est = est, @@ -38,8 +36,7 @@ est[free.idx] <- x # take care of setResidualElements... - lavmodel <- lav_model_set_parameters(lavmodel = lavobject@Model, x = x, - estimator = lavobject@Options$estimator) + lavmodel <- lav_model_set_parameters(lavmodel = lavobject@Model, x = x) GLIST <- lavmodel@GLIST standardize.est.all.nox(lavobject = lavobject, partable = partable, est = est, @@ -59,6 +56,11 @@ if(is.null(partable)) partable <- lavobject@ParTable if(is.null(est)) est <- lav_object_inspect_est(lavobject) if(is.null(GLIST)) GLIST <- lavobject@Model@GLIST + if("SampleStats" %in% slotNames(lavobject)) { + lavsamplestats = lavobject@SampleStats + } else { + lavsamplestats = NULL + } out <- est; N <- length(est) stopifnot(N == length(partable$lhs)) @@ -68,19 +70,19 @@ # compute ETA LV.ETA <- computeVETA(lavmodel = lavobject@Model, GLIST = GLIST, - lavsamplestats = lavobject@SampleStats) + lavsamplestats = lavsamplestats) - for(g in 1:lavobject@Data@ngroups) { + for(g in 1:lavobject@Model@nblocks) { - ov.names <- vnames(lavobject@ParTable, "ov", group=g) # not user, + ov.names <- vnames(lavobject@ParTable, "ov", block=g) # not user, # which may be incomplete - lv.names <- vnames(lavobject@ParTable, "lv", group=g) + lv.names <- vnames(lavobject@ParTable, "lv", block=g) - # shortcut: no latents in this group, nothing to do + # shortcut: no latents in this block, nothing to do if(length(lv.names) == 0L) next - # which mm belong to group g? + # which mm belong to block g? mm.in.group <- 1:nmat[g] + cumsum(c(0,nmat))[g] MLIST <- GLIST[ mm.in.group ] @@ -89,34 +91,34 @@ # 1a. "=~" regular indicators idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names) & - partable$group == g) + partable$block == g) out[idx] <- out[idx] * ETA[ match(partable$lhs[idx], lv.names) ] # 1b. "=~" regular higher-order lv indicators idx <- which(partable$op == "=~" & !(partable$rhs %in% ov.names) & - partable$group == g) + partable$block == g) out[idx] <- ( out[idx] * ETA[ match(partable$lhs[idx], lv.names) ] / ETA[ match(partable$rhs[idx], lv.names) ] ) # 1c. "=~" indicators that are both in ov and lv #idx <- which(partable$op == "=~" & partable$rhs %in% ov.names # & partable$rhs %in% lv.names & - # partable$group == g) + # partable$block == g) # 2. "~" regressions (and "<~") idx <- which((partable$op == "~" | partable$op == "<~") & partable$lhs %in% lv.names & - partable$group == g) + partable$block == g) out[idx] <- out[idx] / ETA[ match(partable$lhs[idx], lv.names) ] idx <- which((partable$op == "~" | partable$op == "<~") & partable$rhs %in% lv.names & - partable$group == g) + partable$block == g) out[idx] <- out[idx] * ETA[ match(partable$rhs[idx], lv.names) ] # 3a. "~~" ov #idx <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & - # partable$group == g) + # partable$block == g) # 3b. "~~" lv # ATTENTION: in Mplus 4.1, the off-diagonal residual covariances @@ -129,7 +131,7 @@ # variances rv.idx <- which(partable$op == "~~" & partable$rhs %in% lv.names & partable$lhs == partable$rhs & - partable$group == g) + partable$block == g) out[rv.idx] <- ( out[rv.idx] / ETA[ match(partable$lhs[rv.idx], lv.names) ] / ETA[ match(partable$rhs[rv.idx], lv.names) ] ) @@ -151,7 +153,7 @@ idx.lhs <- which(partable$op == "~~" & partable$lhs %in% lv.names & partable$lhs != partable$rhs & - partable$group == g) + partable$block == g) if(length(idx.lhs) > 0L) { if(cov.std == FALSE) { out[idx.lhs] <- @@ -166,7 +168,7 @@ idx.rhs <- which(partable$op == "~~" & partable$rhs %in% lv.names & partable$lhs != partable$rhs & - partable$group == g) + partable$block == g) if(length(idx.rhs) > 0L) { if(cov.std == FALSE) { out[idx.rhs] <- @@ -180,11 +182,11 @@ # 4a. "~1" ov #idx <- which(partable$op == "~1" & !(partable$lhs %in% lv.names) & - # partable$group == g) + # partable$block == g) # 4b. "~1" lv idx <- which(partable$op == "~1" & partable$lhs %in% lv.names & - partable$group == g) + partable$block == g) out[idx] <- out[idx] / ETA[ match(partable$lhs[idx], lv.names) ] } @@ -222,31 +224,36 @@ GLIST = GLIST, cov.std = cov.std) } if(is.null(GLIST)) GLIST <- lavobject@Model@GLIST + if("SampleStats" %in% slotNames(lavobject)) { + lavsamplestats = lavobject@SampleStats + } else { + lavsamplestats = NULL + } out <- est.std; N <- length(est.std) stopifnot(N == length(partable$lhs)) VY <- computeVY(lavmodel = lavobject@Model, GLIST = GLIST, - lavsamplestats = lavobject@SampleStats, + lavsamplestats = lavsamplestats, diagonal.only = TRUE) - for(g in 1:lavobject@Data@ngroups) { + for(g in 1:lavobject@Model@nblocks) { - ov.names <- vnames(lavobject@ParTable, "ov", group=g) # not user - lv.names <- vnames(lavobject@ParTable, "lv", group=g) + ov.names <- vnames(lavobject@ParTable, "ov", block = g) # not user + lv.names <- vnames(lavobject@ParTable, "lv", block = g) OV <- sqrt(VY[[g]]) if(lavobject@Model@conditional.x) { # extend OV with ov.names.x - ov.names.x <- vnames(lavobject@ParTable, "ov.x", group=g) + ov.names.x <- vnames(lavobject@ParTable, "ov.x", block = g) ov.names <- c(ov.names, ov.names.x) OV <- c(OV, sqrt(diag(lavobject@SampleStats@cov.x[[g]]))) } # 1a. "=~" regular indicators idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names) & - partable$group == g) + partable$block == g) out[idx] <- out[idx] / OV[ match(partable$rhs[idx], ov.names) ] # 1b. "=~" regular higher-order lv indicators @@ -254,17 +261,17 @@ # 1c. "=~" indicators that are both in ov and lv #idx <- which(partable$op == "=~" & partable$rhs %in% ov.names # & partable$rhs %in% lv.names & - # partable$group == g) + # partable$block == g) # 2. "~" regressions (and "<~") idx <- which((partable$op == "~" | partable$op == "<~") & partable$lhs %in% ov.names & - partable$group == g) + partable$block == g) out[idx] <- out[idx] / OV[ match(partable$lhs[idx], ov.names) ] idx <- which((partable$op == "~" | partable$op == "<~") & partable$rhs %in% ov.names & - partable$group == g) + partable$block == g) out[idx] <- out[idx] * OV[ match(partable$rhs[idx], ov.names) ] # 3a. "~~" ov @@ -278,7 +285,7 @@ # variances rv.idx <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & partable$lhs == partable$rhs & - partable$group == g) + partable$block == g) out[rv.idx] <- ( out[rv.idx] / OV[ match(partable$lhs[rv.idx], ov.names) ] / OV[ match(partable$rhs[rv.idx], ov.names) ] ) @@ -300,7 +307,7 @@ idx.lhs <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & partable$lhs != partable$rhs & - partable$group == g) + partable$block == g) if(length(idx.lhs) > 0L) { if(cov.std == FALSE) { out[idx.lhs] <- @@ -315,7 +322,7 @@ idx.rhs <- which(partable$op == "~~" & !(partable$rhs %in% lv.names) & partable$lhs != partable$rhs & - partable$group == g) + partable$block == g) if(length(idx.rhs) > 0L) { if(cov.std == FALSE) { out[idx.rhs] <- @@ -328,25 +335,25 @@ # 3b. "~~" lv #idx <- which(partable$op == "~~" & partable$rhs %in% lv.names & - # partable$group == g) + # partable$block == g) # 4a. "~1" ov idx <- which(partable$op == "~1" & !(partable$lhs %in% lv.names) & - partable$group == g) + partable$block == g) out[idx] <- out[idx] / OV[ match(partable$lhs[idx], ov.names) ] # 4b. "~1" lv #idx <- which(partable$op == "~1" & partable$lhs %in% lv.names & - # partable$group == g) + # partable$block == g) # 4c. "|" thresholds idx <- which(partable$op == "|" & !(partable$lhs %in% lv.names) & - partable$group == g) + partable$block == g) out[idx] <- out[idx] / OV[ match(partable$lhs[idx], ov.names) ] # 4d. "~*~" scales idx <- which(partable$op == "~*~" & !(partable$lhs %in% lv.names) & - partable$group == g) + partable$block == g) out[idx] <- 1.0 } @@ -386,33 +393,38 @@ GLIST = GLIST, cov.std = cov.std) } if(is.null(GLIST)) GLIST <- lavobject@Model@GLIST + if("SampleStats" %in% slotNames(lavobject)) { + lavsamplestats = lavobject@SampleStats + } else { + lavsamplestats = NULL + } out <- est.std; N <- length(est.std) stopifnot(N == length(partable$lhs)) VY <- computeVY(lavmodel = lavobject@Model, GLIST = GLIST, - lavsamplestats = lavobject@SampleStats, + lavsamplestats = lavsamplestats, diagonal.only = TRUE) - for(g in 1:lavobject@Data@ngroups) { + for(g in 1:lavobject@Model@nblocks) { - ov.names <- vnames(lavobject@ParTable, "ov", group=g) # not user - ov.names.x <- vnames(lavobject@ParTable, "ov.x", group=g) - ov.names.nox <- vnames(lavobject@ParTable, "ov.nox", group=g) - lv.names <- vnames(lavobject@ParTable, "lv", group=g) + ov.names <- vnames(lavobject@ParTable, "ov", block = g) + ov.names.x <- vnames(lavobject@ParTable, "ov.x", block = g) + ov.names.nox <- vnames(lavobject@ParTable, "ov.nox", block = g) + lv.names <- vnames(lavobject@ParTable, "lv", block = g) OV <- sqrt(VY[[g]]) if(lavobject@Model@conditional.x) { # extend OV with ov.names.x - ov.names.x <- vnames(lavobject@ParTable, "ov.x", group=g) + ov.names.x <- vnames(lavobject@ParTable, "ov.x", block = g) ov.names <- c(ov.names, ov.names.x) OV <- c(OV, sqrt(diag(lavobject@SampleStats@cov.x[[g]]))) } # 1a. "=~" regular indicators idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names) & - partable$group == g) + partable$block == g) out[idx] <- out[idx] / OV[ match(partable$rhs[idx], ov.names) ] # 1b. "=~" regular higher-order lv indicators @@ -420,17 +432,17 @@ # 1c. "=~" indicators that are both in ov and lv #idx <- which(partable$op == "=~" & partable$rhs %in% ov.names # & partable$rhs %in% lv.names & - # partable$group == g) + # partable$block == g) # 2. "~" regressions (and "<~") idx <- which((partable$op == "~" | partable$op == "<~") & partable$lhs %in% ov.names & - partable$group == g) + partable$block == g) out[idx] <- out[idx] / OV[ match(partable$lhs[idx], ov.names) ] idx <- which((partable$op == "~" | partable$op == "<~") & partable$rhs %in% ov.names.nox & - partable$group == g) + partable$block == g) out[idx] <- out[idx] * OV[ match(partable$rhs[idx], ov.names.nox) ] # 3a. "~~" ov @@ -445,7 +457,7 @@ rv.idx <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & !(partable$lhs %in% ov.names.x) & partable$lhs == partable$rhs & - partable$group == g) + partable$block == g) out[rv.idx] <- ( out[rv.idx] / OV[ match(partable$lhs[rv.idx], ov.names) ] / OV[ match(partable$rhs[rv.idx], ov.names) ] ) @@ -468,7 +480,7 @@ !(partable$lhs %in% lv.names) & !(partable$lhs %in% ov.names.x) & partable$lhs != partable$rhs & - partable$group == g) + partable$block == g) if(length(idx.lhs) > 0L) { if(cov.std == FALSE) { out[idx.lhs] <- @@ -484,7 +496,7 @@ !(partable$rhs %in% lv.names) & !(partable$rhs %in% ov.names.x) & partable$lhs != partable$rhs & - partable$group == g) + partable$block == g) if(length(idx.rhs) > 0L) { if(cov.std == FALSE) { out[idx.rhs] <- @@ -497,26 +509,26 @@ # 3b. "~~" lv #idx <- which(partable$op == "~~" & partable$rhs %in% lv.names & - # partable$group == g) + # partable$block == g) # 4a. "~1" ov idx <- which(partable$op == "~1" & !(partable$lhs %in% lv.names) & !(partable$lhs %in% ov.names.x) & - partable$group == g) + partable$block == g) out[idx] <- out[idx] / OV[ match(partable$lhs[idx], ov.names) ] # 4b. "~1" lv #idx <- which(partable$op == "~1" & partable$lhs %in% lv.names & - # partable$group == g) + # partable$block == g) # 4c. "|" thresholds idx <- which(partable$op == "|" & !(partable$lhs %in% lv.names) & - partable$group == g) + partable$block == g) out[idx] <- out[idx] / OV[ match(partable$lhs[idx], ov.names) ] # 4d. "~*~" scales idx <- which(partable$op == "~*~" & !(partable$lhs %in% lv.names) & - partable$group == g) + partable$block == g) out[idx] <- 1.0 } @@ -550,45 +562,35 @@ if(is.null(partable$ustart)) partable$ustart <- partable$est - # check if group is missing - if(is.null(partable$group)) - partable$group <- rep(1L, length(partable$ustart)) + # check if block is missing + if(is.null(partable$block)) { + partable$block <- rep(1L, length(partable$ustart)) + } stopifnot(!any(is.na(partable$ustart))) est <- out <- partable$ustart N <- length(est) - # ngroup - if(is.null(partable$group)) { - partable$group <- rep(1L, length(partable$lhs)) - ngroups <- 1L - } else { - if(is.character(partable$group)) { - group.label <- unique(partable$group) - group.label <- group.label[ nchar(group.label) > 0L ] - ngroups <- length(group.label) - } else { - ngroups <- max(partable$group) - } - } + # nblocks + nblocks <- lav_partable_nblocks(partable) # if ov.var is NOT a list, make a list if(!is.list(ov.var)) { tmp <- ov.var - ov.var <- vector("list", length=ngroups) - ov.var[1:ngroups] <- list(tmp) + ov.var <- vector("list", length=nblocks) + ov.var[1:nblocks] <- list(tmp) } - for(g in 1:ngroups) { + for(g in 1:nblocks) { - ov.names <- vnames(partable, "ov", group=g) # not user - lv.names <- vnames(partable, "lv", group=g) + ov.names <- vnames(partable, "ov", block = g) # not user + lv.names <- vnames(partable, "lv", block = g) OV <- sqrt(ov.var[[g]]) # 1a. "=~" regular indicators idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names) & - partable$group == g) + partable$block == g) out[idx] <- out[idx] * OV[ match(partable$rhs[idx], ov.names) ] # 1b. "=~" regular higher-order lv indicators @@ -596,17 +598,17 @@ # 1c. "=~" indicators that are both in ov and lv #idx <- which(partable$op == "=~" & partable$rhs %in% ov.names # & partable$rhs %in% lv.names & - # partable$group == g) + # partable$block == g) # 2. "~" regressions (and "<~") idx <- which((partable$op == "~" | partable$op == "<~") & partable$lhs %in% ov.names & - partable$group == g) + partable$block == g) out[idx] <- out[idx] * OV[ match(partable$lhs[idx], ov.names) ] idx <- which((partable$op == "~" | partable$op == "<~") & partable$rhs %in% ov.names & - partable$group == g) + partable$block == g) out[idx] <- out[idx] / OV[ match(partable$rhs[idx], ov.names) ] # 3a. "~~" ov @@ -620,14 +622,14 @@ # variances rv.idx <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & partable$lhs == partable$rhs & - partable$group == g) + partable$block == g) out[rv.idx] <- ( out[rv.idx] * OV[ match(partable$lhs[rv.idx], ov.names) ] * OV[ match(partable$rhs[rv.idx], ov.names) ] ) # covariances idx <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & partable$lhs != partable$rhs & - partable$group == g) + partable$block == g) if(length(idx) > 0L) { if(cov.std == FALSE) { out[idx] <- ( out[idx] * OV[ match(partable$lhs[idx], ov.names) ] @@ -646,16 +648,16 @@ # 3b. "~~" lv #idx <- which(partable$op == "~~" & partable$rhs %in% lv.names & - # partable$group == g) + # partable$block == g) # 4a. "~1" ov idx <- which(partable$op == "~1" & !(partable$lhs %in% lv.names) & - partable$group == g) + partable$block == g) out[idx] <- out[idx] * OV[ match(partable$lhs[idx], ov.names) ] # 4b. "~1" lv #idx <- which(partable$op == "~1" & partable$lhs %in% lv.names & - # partable$group == g) + # partable$block == g) } diff -Nru r-cran-lavaan-0.5.22/R/lav_start.R r-cran-lavaan-0.5.23.1097/R/lav_start.R --- r-cran-lavaan-0.5.22/R/lav_start.R 2016-09-04 21:51:42.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_start.R 2017-02-21 10:49:22.000000000 +0000 @@ -97,7 +97,7 @@ } # 2. residual lv variances for latent variables - lv.names <- vnames(lavpartable, "lv") # all groups + lv.names <- vnames(lavpartable, "lv") # all blocks lv.var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs %in% lv.names & lavpartable$lhs == lavpartable$rhs) @@ -109,31 +109,31 @@ start[delta.idx] <- 1.0 - # group-specific settings - ngroups <- lavsamplestats@ngroups + # block-specific settings + nblocks <- lav_partable_nblocks(lavpartable) - for(g in 1:ngroups) { + for(g in 1:nblocks) { - # info from user model for this group + # info from user model for this block if(conditional.x) { - ov.names <- vnames(lavpartable, "ov.nox", group=g) + ov.names <- vnames(lavpartable, "ov.nox", block = g) } else { - ov.names <- vnames(lavpartable, "ov", group=g) + ov.names <- vnames(lavpartable, "ov", block = g) } if(categorical) { - ov.names.num <- vnames(lavpartable, "ov.num", group=g) - ov.names.ord <- vnames(lavpartable, "ov.ord", group=g) + ov.names.num <- vnames(lavpartable, "ov.num", block = g) + ov.names.ord <- vnames(lavpartable, "ov.ord", block = g) } else { ov.names.num <- ov.names } - lv.names <- vnames(lavpartable, "lv", group=g) - ov.names.x <- vnames(lavpartable, "ov.x", group=g) + lv.names <- vnames(lavpartable, "lv", block = g) + ov.names.x <- vnames(lavpartable, "ov.x", block = g) # g1) factor loadings if(start.initial %in% c("lavaan", "mplus") && model.type %in% c("sem", "cfa") && #!categorical && - sum( lavpartable$ustart[ lavpartable$op == "=~" & lavpartable$group == g], + sum( lavpartable$ustart[ lavpartable$op == "=~" & lavpartable$block == g], na.rm=TRUE) == length(lv.names) ) { # only if all latent variables have a reference item, # we use the fabin3 estimator (2sls) of Hagglund (1982) @@ -142,11 +142,11 @@ # coefficient (y=marker, x=2nd indicator) for(f in lv.names) { free.idx <- which( lavpartable$lhs == f & lavpartable$op == "=~" - & lavpartable$group == g + & lavpartable$block == g & lavpartable$free > 0L) user.idx <- which( lavpartable$lhs == f & lavpartable$op == "=~" - & lavpartable$group == g ) + & lavpartable$block == g ) # no second order if(any(lavpartable$rhs[user.idx] %in% lv.names)) next @@ -194,7 +194,7 @@ if(model.type == "unrestricted") { # fill in 'covariances' from lavsamplestats - cov.idx <- which(lavpartable$group == g & + cov.idx <- which(lavpartable$block == g & lavpartable$op == "~~" & lavpartable$lhs != lavpartable$rhs) lhs.idx <- match(lavpartable$lhs[cov.idx], ov.names) @@ -203,7 +203,7 @@ } # 2g) residual ov variances (including exo, to be overriden) - ov.var.idx <- which(lavpartable$group == g & + ov.var.idx <- which(lavpartable$block == g & lavpartable$op == "~~" & lavpartable$lhs %in% ov.names.num & lavpartable$lhs == lavpartable$rhs) @@ -232,7 +232,7 @@ # variances of ordinal variables - set to 1.0 if(categorical) { - ov.var.ord.idx <- which(lavpartable$group == g & + ov.var.ord.idx <- which(lavpartable$block == g & lavpartable$op == "~~" & lavpartable$lhs %in% ov.names.ord & lavpartable$lhs == lavpartable$rhs) @@ -240,7 +240,7 @@ } # 3g) intercepts/means - ov.int.idx <- which(lavpartable$group == g & + ov.int.idx <- which(lavpartable$block == g & lavpartable$op == "~1" & lavpartable$lhs %in% ov.names) sample.int.idx <- match(lavpartable$lhs[ov.int.idx], ov.names) @@ -255,14 +255,14 @@ } # 4g) thresholds - th.idx <- which(lavpartable$group == g & lavpartable$op == "|") + th.idx <- which(lavpartable$block == g & lavpartable$op == "|") if(length(th.idx) > 0L) { th.names.lavpartable <- paste(lavpartable$lhs[th.idx], "|", lavpartable$rhs[th.idx], sep="") th.names.sample <- lavsamplestats@th.names[[g]][ lavsamplestats@th.idx[[g]] > 0L ] # th.names.sample should identical to - # vnames(lavpartable, "th", group = g) + # vnames(lavpartable, "th", block = g) if(conditional.x) { th.values <- lavsamplestats@res.th[[g]][lavsamplestats@th.idx[[g]] > 0L] @@ -276,7 +276,7 @@ # 5g) exogenous `fixed.x' covariates if(!conditional.x && length(ov.names.x) > 0) { - exo.idx <- which(lavpartable$group == g & + exo.idx <- which(lavpartable$block == g & lavpartable$op == "~~" & lavpartable$lhs %in% ov.names.x & lavpartable$rhs %in% ov.names.x) @@ -297,22 +297,22 @@ } # 6b. exogenous lv variances if single indicator -- new in 0.5-21 - lv.x <- vnames(lavpartable, "lv.x", group = g) + lv.x <- vnames(lavpartable, "lv.x", block = g) if(length(lv.x) > 0L) { for(ll in lv.x) { ind.idx <- which(lavpartable$op == "=~" & lavpartable$lhs == ll, - lavpartable$group == g) + lavpartable$block == g) if(length(ind.idx) == 1L) { single.ind <- lavpartable$rhs[ind.idx] single.fvar.idx <- which(lavpartable$op == "~~" & lavpartable$lhs == ll & lavpartable$rhs == ll & - lavpartable$group == g) + lavpartable$block == g) single.var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs == single.ind & lavpartable$rhs == single.ind & - lavpartable$group == g) + lavpartable$block == g) # user-defined residual variance single.var <- lavpartable$ustart[single.var.idx] if(is.na(single.var)) { @@ -343,6 +343,7 @@ group.idx <- which(lavpartable$lhs == "group" & lavpartable$op == "%") if(length(group.idx) > 0L) { + ngroups <- length(group.idx) #prop <- rep(1/ngroups, ngroups) # use last group as reference #start[group.idx] <- log(prop/prop[ngroups]) @@ -370,11 +371,11 @@ # we only look at the 'est' column for now if(!is.null(start.user)) { - if(is.null(lavpartable$group)) { - lavpartable$group <- rep(1L, length(lavpartable$lhs)) + if(is.null(lavpartable$block)) { + lavpartable$block <- rep(1L, length(lavpartable$lhs)) } - if(is.null(start.user$group)) { - start.user$group <- rep(1L, length(start.user$lhs)) + if(is.null(start.user$block)) { + start.user$block <- rep(1L, length(start.user$lhs)) } # FIXME: avoid for loop!!! @@ -383,12 +384,12 @@ lhs <- lavpartable$lhs[i] op <- lavpartable$op[i] rhs <- lavpartable$rhs[i] - grp <- lavpartable$group[i] + grp <- lavpartable$block[i] start.user.idx <- which(start.user$lhs == lhs & start.user$op == op & start.user$rhs == rhs & - start.user$group == grp) + start.user$block == grp) if(length(start.user.idx) == 1L && is.finite(start.user$est[start.user.idx])) { start[i] <- start.user$est[start.user.idx] @@ -414,17 +415,13 @@ # sanity check: (user-specified) variances smaller than covariances lav_start_check_cov <- function(lavpartable = NULL, start = lavpartable$start) { - if(is.null(lavpartable$group)) { - ngroups <- 1L - } else { - ngroups <- max(lavpartable$group) - } + nblocks <- lav_partable_nblocks(lavpartable) - for(g in 1:ngroups) { + for(g in 1:nblocks) { # collect all non-zero covariances cov.idx <- which(lavpartable$op == "~~" & - lavpartable$group == g & + lavpartable$block == g & lavpartable$lhs != lavpartable$rhs & !lavpartable$exo & start != 0) @@ -439,21 +436,21 @@ var.rhs <- lavpartable$rhs[this.cov.idx] var.lhs.idx <- which(lavpartable$op == "~~" & - lavpartable$group == g & + lavpartable$block == g & lavpartable$lhs == var.lhs & lavpartable$lhs == lavpartable$rhs) var.rhs.idx <- which(lavpartable$op == "~~" & - lavpartable$group == g & + lavpartable$block == g & lavpartable$lhs == var.rhs & lavpartable$lhs == lavpartable$rhs) var.lhs.value <- start[var.lhs.idx] var.rhs.value <- start[var.rhs.idx] - group.txt <- "" - if(ngroups > 1L) { - group.txt <- paste(" [in group ", g, "]", sep = "") + block.txt <- "" + if(nblocks > 1L) { + block.txt <- paste(" [in block ", g, "]", sep = "") } # check for zero variances @@ -465,11 +462,11 @@ } else if(lavpartable$free[this.cov.idx] > 0L) { warning( "lavaan WARNING: non-zero covariance element set to zero, due to fixed-to-zero variances\n", -" variables involved are: ", var.lhs, " ", var.rhs, group.txt) +" variables involved are: ", var.lhs, " ", var.rhs, block.txt) start[this.cov.idx] <- 0 } else { stop("lavaan ERROR: please provide better fixed values for (co)variances;\n", -" variables involved are: ", var.lhs, " ", var.rhs, group.txt) +" variables involved are: ", var.lhs, " ", var.rhs, block.txt) } next } @@ -490,14 +487,14 @@ # force simple values warning( "lavaan WARNING: starting values imply NaN for a correlation value;\n", -" variables involved are: ", var.lhs, " ", var.rhs, group.txt) +" variables involved are: ", var.lhs, " ", var.rhs, block.txt) start[var.lhs.idx] <- 1 start[var.rhs.idx] <- 1 start[this.cov.idx] <- 0 } else if(abs(COR) > 1) { warning( "lavaan WARNING: starting values imply a correlation larger than 1;\n", -" variables involved are: ", var.lhs, " ", var.rhs, group.txt) +" variables involved are: ", var.lhs, " ", var.rhs, block.txt) # three ways to fix it: rescale cov12, var1 or var2 @@ -523,7 +520,7 @@ # nothing? abort } else { stop("lavaan ERROR: please provide better fixed values for (co)variances;\n", -" variables involved are: ", var.lhs, " ", var.rhs, group.txt) +" variables involved are: ", var.lhs, " ", var.rhs, block.txt) } } # COR > 1 } # cov.idx diff -Nru r-cran-lavaan-0.5.22/R/lav_tables.R r-cran-lavaan-0.5.23.1097/R/lav_tables.R --- r-cran-lavaan-0.5.22/R/lav_tables.R 2016-09-24 13:16:11.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_tables.R 2017-02-07 16:41:15.000000000 +0000 @@ -114,7 +114,8 @@ } } - if(nrow(out) == 0L) { + if( (is.data.frame(out) && nrow(out) == 0L) || + (is.list(out) && length(out) == 0L)) { # empty table (perhaps, no categorical variables) return(invisible(out)) } @@ -1107,7 +1108,8 @@ out } -lav_tables_cells_format <- function(out, lavdata = lavdata) { +lav_tables_cells_format <- function(out, lavdata = lavdata, + drop.list.single.group = FALSE) { OUT <- vector("list", length=lavdata@ngroups) if(is.null(out$group)) { @@ -1146,12 +1148,14 @@ sep="_")) OUT[[g]] <- TMP } - if(lavdata@ngroups > 1L) { - out <- OUT - names(out) <- lavdata@group.label + + if(lavdata@ngroups == 1L && drop.list.single.group) { + OUT <- OUT[[1]] } else { - out <- OUT[[1L]] + if(length(lavdata@group.label) > 0L) { + names(OUT) <- unlist(lavdata@group.label) + } } - out + OUT } diff -Nru r-cran-lavaan-0.5.22/R/lav_test_diff.R r-cran-lavaan-0.5.23.1097/R/lav_test_diff.R --- r-cran-lavaan-0.5.22/R/lav_test_diff.R 2016-07-20 17:25:39.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_test_diff.R 2017-02-21 19:05:00.000000000 +0000 @@ -141,6 +141,8 @@ lav_test_diff_SatorraBentler2010 <- function(m1, m0, H1 = FALSE) { + ### FIXME: check if models are nested at the parameter level!!! + # extract information from m1 and m2 T1 <- m1@test[[1]]$stat r1 <- m1@test[[1]]$df @@ -220,10 +222,10 @@ # FIXME: # - check if H0 does not contain additional parameters... + Options$optim.method = "none" + Options$optim.force.converged = TRUE m10 <- lavaan(model = PT.M1.extended, start = PE.M0.extended, - control=list(optim.method = "none", - optim.force.converged = TRUE) , slotOptions = Options, slotSampleStats = m1@SampleStats, slotData = m1@Data, @@ -329,6 +331,8 @@ p0.free.idx <- which(PT.M0.part1$free > 0) # change 'free' order in m0 + # NOTE: this only works all the free parameters in h0 are also free + # in h1 (and if not, they will become fixed in h0) PT.M0.part1$free[p0.free.idx] <- PT.M1.part1$free[ PT.M0.part1$id[p1.id][p0.free.idx] ] diff -Nru r-cran-lavaan-0.5.22/R/lav_test.R r-cran-lavaan-0.5.23.1097/R/lav_test.R --- r-cran-lavaan-0.5.22/R/lav_test.R 2016-03-26 19:52:06.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_test.R 2017-02-05 19:08:35.000000000 +0000 @@ -1,5 +1,5 @@ -testStatisticSatorraBentler <- function(lavsamplestats=lavsamplestats, - E.inv, Delta, WLS.V, +testStatisticSatorraBentler <- function(lavsamplestats=lavsamplestats, + E.inv, Delta, WLS.V, Gamma, x.idx=list(integer(0)), test.UGamma.eigvals = FALSE) { @@ -15,7 +15,9 @@ # we write it like this to allow for fixed.x covariates which affect A1 # and B1 - Gamma <- lavsamplestats@NACOV + if(is.null(Gamma)) { + Gamma <- lavsamplestats@NACOV + } nss <- ncol(Gamma[[1]]) ngroups <- lavsamplestats@ngroups @@ -254,8 +256,7 @@ VCOV = NULL, lavcache = NULL, lavdata = NULL, - test.UGamma.eigvals = FALSE, - control = list()) { + test.UGamma.eigvals = FALSE) { mimic <- lavoptions$mimic @@ -426,6 +427,7 @@ E.inv <- attr(VCOV, "E.inv") Delta <- attr(VCOV, "Delta") WLS.V <- attr(VCOV, "WLS.V") + Gamma <- attr(VCOV, "Gamma") # if not present (perhaps se.type="standard" or se.type="none") # we need to compute these again @@ -440,7 +442,7 @@ } else { E <- lav_model_information_expected(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = estimator, extra = TRUE) + extra = TRUE) } E.inv <- try(lav_model_information_augment_invert(lavmodel, information = E, inverted = TRUE), silent=TRUE) @@ -487,7 +489,8 @@ testStatisticSatorraBentler(lavsamplestats = lavsamplestats, E.inv = E.inv, Delta = Delta, - WLS.V = WLS.V, + WLS.V = WLS.V, + Gamma = Gamma, x.idx = x.idx) trace.UGamma2 <- attr(trace.UGamma, "trace.UGamma2") # trace.UGamma3 <- attr(trace.UGamma, "trace.UGamma3") @@ -564,7 +567,6 @@ E.inv <- lav_model_information(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = "ML", lavcache = lavcache, information = information, extra = FALSE, @@ -587,7 +589,6 @@ lav_model_information_firstorder(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = estimator, lavcache = lavcache, extra = TRUE, check.pd = FALSE, @@ -687,10 +688,7 @@ verbose = lavoptions$verbose, type = boot.type, FUN = "test", - warn = -1L, - parallel = control$parallel, - ncpus = control$ncpus, - cl = control$cl) + warn = -1L) BOOT.TEST <- drop(BOOT.TEST) } diff -Nru r-cran-lavaan-0.5.22/R/lav_test_satorra_bentler.R r-cran-lavaan-0.5.23.1097/R/lav_test_satorra_bentler.R --- r-cran-lavaan-0.5.22/R/lav_test_satorra_bentler.R 2016-02-07 14:36:21.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_test_satorra_bentler.R 2017-01-25 19:06:30.000000000 +0000 @@ -53,7 +53,7 @@ } else { E <- lav_model_information_expected(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, - estimator = lavoptions$estimator, extra = TRUE) + extra = TRUE) } E.inv <- try(lav_model_information_augment_invert(lavmodel, information = E, inverted = TRUE), silent=TRUE) diff -Nru r-cran-lavaan-0.5.22/R/lav_test_score.R r-cran-lavaan-0.5.23.1097/R/lav_test_score.R --- r-cran-lavaan-0.5.22/R/lav_test_score.R 2016-07-20 17:26:14.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_test_score.R 2017-02-20 15:57:05.000000000 +0000 @@ -219,7 +219,7 @@ # create epc table for the 'free' parameters LIST <- parTable(object) LIST <- LIST[,c("lhs","op","rhs","group","free","label","plabel")] - if(max(LIST$group) == 1L) { + if(lav_partable_ngroups(LIST) == 1L) { LIST$group <- NULL } nonpar.idx <- which(LIST$op %in% c("==", ":=", "<", ">")) diff -Nru r-cran-lavaan-0.5.22/R/lav_utils.R r-cran-lavaan-0.5.23.1097/R/lav_utils.R --- r-cran-lavaan-0.5.22/R/lav_utils.R 2016-06-06 11:39:34.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/lav_utils.R 2016-10-03 17:44:31.000000000 +0000 @@ -2,6 +2,13 @@ # # initial version: YR 25/03/2009 +# compute log(sum(exp(x))) avoiding under/overflow +# using the identity: log(sum(exp(x)) = a + log(sum(exp(x - a))) +lav_utils_logsumexp <- function(x) { + a <- max(x) + a + log(sum(exp(x - a))) +} + # invert positive definite symmetric matrix (eg cov matrix) # using choleski decomposition diff -Nru r-cran-lavaan-0.5.22/R/xxx_fsr.R r-cran-lavaan-0.5.23.1097/R/xxx_fsr.R --- r-cran-lavaan-0.5.22/R/xxx_fsr.R 2016-07-03 14:41:32.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/xxx_fsr.R 2017-02-18 10:14:07.000000000 +0000 @@ -5,15 +5,22 @@ # - Skrondal & Laake (2001) (regression models only) # - Croon (2002) (general + robust SE) -fsr <- function(model = NULL, data = NULL, cmd = "sem", - fsr.method = "Croon", fs.method = "Bartlett", ...) { +fsr <- function(model = NULL, + data = NULL, + cmd = "sem", + fsr.method = "Croon", + fs.method = "Bartlett", + fs.scores = FALSE, + Gamma.NT = TRUE, + lvinfo = FALSE, + ...) { # we need full data if(is.null(data)) { stop("lavaan ERROR: full data is required for factor score regression") } - # check arguments + # check fsr.method argument fsr.method <- tolower(fsr.method) if(fsr.method == "naive") { # nothing to do @@ -22,40 +29,81 @@ fsr.method <- "skrondal.laake" } else if(fsr.method == "croon") { # nothing to do + } else { + stop("lavaan ERROR: invalid option for argument fsr.method: ", + fsr.method) } + # check fs.method argument fs.method <- tolower(fs.method) - + if(fs.method %in% c("bartlett", "barttlett", "bartlet")) { + fs.method <- "Bartlett" + } else if(fs.method == "regression") { + # nothing to do + } else { + stop("lavaan ERROR: invalid option for argument fs.method: ", + fs.method) + } + # dot dot dot dotdotdot <- list(...) - # check dotdotdot - if(!is.null(dotdotdot$meanstructure)) { - dotdotdot$meanstructure <- NULL + # change 'default' values for fsr + if(is.null(dotdotdot$se)) { + dotdotdot$se <- "none" } - - if(!is.null(dotdotdot$do.fit)) { - dotdotdot$do.fit <- NULL + if(is.null(dotdotdot$test)) { + dotdotdot$test <- "satorra.bentler" + } + if(is.null(dotdotdot$missing)) { + dotdotdot$missing <- "ml" } - + if(is.null(dotdotdot$meanstructure)) { + dotdotdot$meanstructure <- TRUE + } + + + # STEP 0: process full model, without fitting + dotdotdot0 <- dotdotdot + dotdotdot0$do.fit <- NULL + dotdotdot0$se <- "none" # to avoid warning about missing="listwise" + dotdotdot0$test <- "none" # to avoid warning about missing="listwise" # check for arguments that we do not want (eg sample.cov)? # TODO - # process model, no fitting + # initial processing of the model, no fitting FIT <- do.call(cmd, args = c(list(model = model, data = data, - meanstructure = TRUE, - do.fit = FALSE), dotdotdot) ) - ngroups <- lavInspect(FIT, "ngroups") + #meanstructure = TRUE, + do.fit = FALSE), dotdotdot0) ) + lavoptions <- lavInspect(FIT, "options") + # restore + lavoptions$se <- dotdotdot$se + lavoptions$test <- dotdotdot$test + ngroups <- lavInspect(FIT, "ngroups") + lavpta <- FIT@pta + + # FIXME: not ready for multiple groups yet + if(ngroups > 1L) { + stop("lavaan ERROR: fsr code not ready for multiple groups (yet)") + } + + # if missing = "listwise", make data complete + if(lavoptions$missing == "listwise") { + # FIXME: make this work for multiple groups!! + OV <- unique(unlist(lavpta$vnames$ov)) + data <- na.omit(data[,OV]) + } + # any `regular' latent variables? lv.names <- unique(unlist(FIT@pta$vnames$lv.regular)) - nfac <- length(lv.names) if(length(lv.names) == 0L) { stop("lavaan ERROR: model does not contain any latent variables") } + nfac <- length(lv.names) # check parameter table PT <- parTable(FIT) @@ -76,29 +124,22 @@ # check if we can use skrondal & laake (no mediational terms?) if(fsr.method == "skrondal.laake") { if(any(eqs.x.names %in% eqs.y.names)) { - stop("lavaan ERROR: mediational relationship are not allowed for the Skrondal.Laake method; use ", sQuote("Croon"), " instead.") + stop("lavaan ERROR: mediational relationships are not allowed for the Skrondal.Laake method; use ", sQuote("Croon"), " instead.") } } - # STEP 1: + # STEP 1a: compute factor scores for each latent variable + # compute factor scores, per latent variable FS.SCORES <- vector("list", length = ngroups) - FSR.SCORES <- vector("list", length = ngroups) - FS.COV <- vector("list", length = ngroups) - FSR.COV <- vector("list", length = ngroups) LVINFO <- vector("list", length = ngroups) if(ngroups > 1L) { - names(FS.SCORES) <- names(LVINFO) <- names(FS.COV) <- - names(FSR.SCORES) <- names(FSR.COV) <- - lavInspect(FIT, "group.label") + names(FS.SCORES) <- names(LVINFO) <- lavInspect(FIT, "group.label") } - for(g in 1:ngroups) { FS.SCORES[[g]] <- vector("list", length = nfac) - FSR.SCORES[[g]] <- vector("list", length = nfac) - names(FS.SCORES[[g]]) <- names(FSR.SCORES[[g]]) <- lv.names - + names(FS.SCORES[[g]]) <- lv.names LVINFO[[g]] <- vector("list", length = nfac) names(LVINFO[[g]]) <- lv.names } @@ -109,58 +150,16 @@ dotdotdot2$test <- "none" dotdotdot2$debug <- FALSE dotdotdot2$verbose <- FALSE + dotdotdot2$auto.cov.lv.x <- TRUE # allow correlated exogenous factors - # we assume the same number/names of lv's per group!!! for(f in 1:nfac) { - FAC <- lv.names[f] - IND <- PT$rhs[ PT$op == "=~" & PT$lhs == FAC ] - - # check number of indicators - if(length(IND) < 3L) { - stop("lavaan ERROR: fsr currently needs at least 3 indicators per factor") - } - - # the latent variable definitions - op.idx <- which(PT$op == "=~" & PT$lhs == FAC) - - # the residuals + factor variance - var.idx <- which(PT$op == "~~" & PT$lhs %in% c(IND,FAC) & - PT$rhs %in% c(IND,FAC) & - PT$lhs == PT$rhs) - - # any residual covariances among the indicators - cov.idx <- which(PT$op == "~~" & PT$lhs %in% IND & - PT$rhs %in% IND & - PT$lhs != PT$rhs) - - # means/intercepts - # ov.int <- which(PT$op == "~1" & PT$lhs %in% IND) - # lv.int <- which(PT$op == "~1" & PT$lhs == FAC) - - - # any regression where lhs is an indicator - reg.idx <- which(PT$op == "~" & PT$lhs %in% IND & - !PT$rhs %in% lv.names) - - # and their variances... - reg.names <- PT$rhs[ reg.idx ] - var2.idx <- which(PT$op == "~~" & PT$lhs %in% reg.names & - PT$rhs %in% reg.names & - PT$lhs == PT$rhs) - - # eq constraints? - # TODO!! - keep.idx <- c(op.idx, var.idx, cov.idx, reg.idx, var2.idx) #, - #ov.int, lv.int) - PT.1fac <- PT[keep.idx, , drop = FALSE] - - # clean up - PT.1fac <- lav_partable_complete(PT.1fac) - + # create parameter table for this factor only + PT.1fac <- lav_partable_subset_measurement_model(PT = PT, + lavpta = lavpta, + lv.names = lv.names[f]) # fit 1-factor model - #fit.1fac <- lavaan(PT.1fac, data = data, ...) fit.1fac <- do.call("lavaan", args = c(list(model = PT.1fac, data = data), dotdotdot2) ) @@ -168,42 +167,37 @@ # fs.method? if(fsr.method == "skrondal.laake") { # dependent -> Bartlett - if(FAC %in% eqs.y.names) { + if(lv.names[f] %in% eqs.y.names) { fs.method <- "Bartlett" } else { fs.method <- "regression" } } - if(fsr.method %in% c("croon") || FIT@Options$se == "robust.sem") { - fsm <- TRUE + # compute factor scores + if(fsr.method %in% c("croon") || + lavoptions$se == "robust.sem") { + SC <- lav_predict_eta(fit.1fac, method = fs.method, fsm = TRUE) + FSM <- attr(SC, "fsm"); attr(SC, "fsm") <- NULL + LAMBDA <- computeLAMBDA(fit.1fac@Model) + THETA <- computeTHETA(fit.1fac@Model) } else { - fsm <- FALSE + SC <- lav_predict_eta(fit.1fac, method = fs.method, fsm = FALSE) } - # compute factor scores - SC <- lav_predict_eta(fit.1fac, method = fs.method, fsm = fsm) - + # store results for(g in 1:ngroups) { - - if(fsr.method %in% c("croon") || FIT@Options$se == "robust.sem") { - FSM <- attr(SC, "fsm") - attr(SC, "fsm") <- NULL - FS.SCORES[[g]][[f]] <- SC[[g]] - - lambda.idx <- which(names(fit.1fac@Model@GLIST) == "lambda") - theta.idx <- which(names(fit.1fac@Model@GLIST) == "theta") - LVINFO[[g]][[f]] <- - list(fsm = FSM[[g]], - lambda = fit.1fac@Model@GLIST[[lambda.idx[g]]], - theta = fit.1fac@Model@GLIST[[theta.idx[g]]]) - } else { - FS.SCORES[[g]][[f]] <- SC[[g]] + FS.SCORES[[g]][[f]] <- SC[[g]] + if(fsr.method %in% c("croon") || + lavoptions$se == "robust.sem") { + LVINFO[[g]][[f]] <- list(fsm = FSM[[g]], lambda = LAMBDA[[g]], + theta = THETA[[g]]) } } # g } # nfac + # cbind factor scores FS.SCORES <- lapply(1:ngroups, function(g) { SC <- as.data.frame(FS.SCORES[[g]]) @@ -212,214 +206,211 @@ # compute empirical covariance matrix factor scores FS.COV <- lapply(1:ngroups, function(g) { - COV <- cov(FS.SCORES[[g]]) ## divided by N-1!!! - if(FIT@Options$likelihood == "normal") { + COV <- cov(FS.SCORES[[g]]) ## divided by N-1!!! + if(lavoptions$likelihood == "normal") { Ng <- lavInspect(FIT, "nobs")[g] COV <- COV * (Ng - 1) / Ng } COV }) - FSR.COV <- FS.COV + if(lavoptions$meanstructure) { + FS.MEAN <- lapply(1:ngroups, function(g) { colMeans(FS.SCORES[[g]]) }) + } else { + FS.MEAN <- NULL + } + + # STEP 1b: if using `Croon' method: correct COV matrix: + if(fsr.method %in% c("croon")) { + FSR.COV <- lav_fsr_croon_correction(FS.COV = FS.COV, + LVINFO = LVINFO, + fs.method = fs.method) + } else { + FSR.COV <- FS.COV + } + # STEP 1c: do we need full set of factor scores? + if(fs.scores) { + # transform? + if(fsr.method == "croon") { + for(g in 1:ngroups) { + OLD.inv <- solve(FS.COV[[g]]) + OLD.inv.sqrt <- lav_matrix_symmetric_sqrt(OLD.inv) + FSR.COV.sqrt <- lav_matrix_symmetric_sqrt(FSR.COV[[g]]) + SC <- as.matrix(FS.SCORES[[g]]) + SC <- SC %*% OLD.inv.sqrt %*% FSR.COV.sqrt + SC <- as.data.frame(SC) + names(SC) <- lv.names + FS.SCORES[[g]] <- SC + } + } - # STEP 2: - # construct path analysis model (removing all measurement elements) - PT.PA <- PT - PT.PA$est <- PT.PA$se <- NULL + # unlist if multiple groups, add group column + if(ngroups == 1L) { + FS.SCORES <- as.data.frame(FS.SCORES[[1]]) + } else { + stop("fix this!") + } + } - # extract all regressions - reg.idx <- which(PT$op == "~" & PT$lhs %in% eqs.names & - PT$rhs %in% eqs.names) - # the variances - var.idx <- which(PT$op == "~~" & PT$lhs %in% eqs.names & - PT$rhs %in% eqs.names & - PT$lhs == PT$rhs) - # optionally covariances (exo!) - cov.idx <- which(PT$op == "~~" & PT$lhs %in% eqs.names & - PT$rhs %in% eqs.names & - PT$lhs != PT$rhs) - # means/intercepts - int.idx <- which(PT$op == "~1" & PT$lhs %in% eqs.names) + # STEP 2: fit structural model using (corrected?) factor scores - keep.idx <- c(reg.idx, var.idx, cov.idx, int.idx) - PT.PA <- PT.PA[keep.idx, , drop = FALSE] + PT.PA <- lav_partable_subset_structural_model(PT, lavpta = lavpta) # free all means/intercepts int.idx <- which(PT.PA$op == "~1") PT.PA$free[int.idx] <- 1L PT.PA$ustart[int.idx] <- NA - # what about equality constraints? - # TODO - # what about inequality constraints? - if(any(PT.PA$op %in% c(">", "<"))) { - stop("lavaan ERROR: fsr does not support inequality constraints") + # adjust lavoptions + if(is.null(dotdotdot$do.fit)) { + lavoptions$do.fit <- TRUE + } else { + lavoptions$do.fit <- dotdotdot$do.fit } - - PT.PA <- lav_partable_complete(PT.PA) - - # adjust using Croon method - if(fsr.method %in% c("croon")) { - # compute 'corrected' COV for the factor scores - # using the Croon method + if(is.null(dotdotdot$se)) { + lavoptions$se <- "robust.sem" + } else { + lavoptions$se <- dotdotdot$se + } + if(is.null(dotdotdot$test)) { + lavoptions$test <- "satorra.bentler" + } else { + lavoptions$test <- dotdotdot$test + } + if(is.null(dotdotdot$sample.cov.rescale)) { + lavoptions$sample.cov.rescale <- FALSE + } else { + lavoptions$sample.cov.rescale <- dotdotdot$sample.cov.rescale + } + # take care of NACOV, in case we want correct standard errors + if(lavoptions$se == "robust.sem") { + Omega.f <- vector("list", length = ngroups) for(g in 1:ngroups) { - - # correct covariances only - if(fs.method != "bartlett") { - for(i in 1:(nfac-1)) { - LHS <- lv.names[i] - - A.y <- LVINFO[[g]][[LHS]]$fsm - lambda.y <- LVINFO[[g]][[LHS]]$lambda - - for(j in (i+1):nfac) { - RHS <- lv.names[j] - - A.x <- LVINFO[[g]][[RHS]]$fsm - lambda.x <- LVINFO[[g]][[RHS]]$lambda - - # always 1 if Bartlett - A.xy <- as.numeric(crossprod(A.x %*% lambda.x, - A.y %*% lambda.y)) - - # corrected covariance - FSR.COV[[g]][i,j] <- FSR.COV[[g]][j,i] <- - FS.COV[[g]][LHS,RHS] / A.xy + DATA <- FIT@Data@X[[g]] + if(Gamma.NT) { + if(lavoptions$missing == "listwise") { + Omega.y <- lav_samplestats_Gamma_NT(Y = DATA, + meanstructure = lavoptions$meanstructure, + rescale = TRUE, fixed.x = FALSE) + } else if(lavoptions$missing == "ml") { + # we assume UNSTRUCTURED Mu and Sigma!! + MU <- FIT@SampleStats@missing.h1[[g]]$mu + SIGMA <- FIT@SampleStats@missing.h1[[g]]$sigma + if(lavoptions$information == "expected") { + Info <- lav_mvnorm_missing_information_expected( + Y = DATA, Mp = FIT@Data@Mp[[g]], + Mu = MU, Sigma = SIGMA) + } else { + Info <- lav_mvnorm_missing_information_observed_samplestats( + Yp = FIT@SampleStats@missing[[g]], + Mu = MU, Sigma = SIGMA) } + Omega.y <- lav_matrix_symmetric_inverse(Info) + } else { + stop("lavaan ERROR: can not handle missing = ", + lavoptions$missing) } - } - - # correct variances - for(i in 1:nfac) { - RHS <- lv.names[i] - - A.x <- LVINFO[[g]][[RHS]]$fsm - lambda.x <- LVINFO[[g]][[RHS]]$lambda - theta.x <- LVINFO[[g]][[RHS]]$theta - if(fs.method == "bartlett") { - A.xx <- 1.0 + } else { + if(lavoptions$missing == "listwise") { + Omega.y <- lav_samplestats_Gamma(Y = DATA, + meanstructure = lavoptions$meanstructure, + fixed.x = FALSE) + } else if(lavoptions$missing == "ml") { + # we assume UNSTRUCTURED Mu and Sigma!! + MU <- FIT@SampleStats@missing.h1[[g]]$mu + SIGMA <- FIT@SampleStats@missing.h1[[g]]$sigma + Omega.y <- lav_mvnorm_missing_h1_omega_sw(Y = + DATA, Mp = FIT@Data@Mp[[g]], + Yp = FIT@SampleStats@missing[[g]], + Mu = MU, Sigma = SIGMA, + information = lavoptions$information) } else { - A.xx <- as.numeric(crossprod(A.x %*% lambda.x)) + stop("lavaan ERROR: can not handle missing = ", + lavoptions$missing) } - - offset.x <- as.numeric(A.x %*% theta.x %*% t(A.x)) - - FSR.COV[[g]][i,i] <- (FS.COV[[g]][RHS, RHS] - offset.x)/A.xx } - } # g - - } # croon - - - # Step 3: sem using factor scores - if(fsr.method == "naive") { - # FIXME!!! rbind FS.SCORES in the multiple group case!!!! - if(ngroups > 1L) { - stop("lavaan ERROR: fsr code not ready for multiple groups (yet)") - } - FS.SCORES <- as.data.frame(FS.SCORES[[1]]) + # factor score matrices + A <- lav_matrix_bdiag(lapply(LVINFO[[g]], "[[", "fsm")) - if(FIT@Options$se == "robust.sem") { - # compute Omega.y (using NT for now) - DATA <- lavInspect(FIT, "data") - Omega.y <- lav_samplestats_Gamma_NT(Y = DATA, - meanstructure = TRUE, - rescale = TRUE, - fixed.x = FALSE) + # compensate for Croon correction + if(fs.method == "regression") { + if(!exists("OLD.inv.sqrt")) { + OLD.inv <- solve(FS.COV[[g]]) + OLD.inv.sqrt <- lav_matrix_symmetric_sqrt(OLD.inv) + } + if(!exists("FSR.COV.sqrt")) { + FSR.COV.sqrt <- lav_matrix_symmetric_sqrt(FSR.COV[[g]]) + } + A <- OLD.inv.sqrt %*% FSR.COV.sqrt %*% A + } - A <- lav_matrix_bdiag(lapply(LVINFO[[1]], "[[", "fsm")) - A11 <- A + # mean + vech(sigma) A22 <- lav_matrix_duplication_post( lav_matrix_duplication_ginv_pre(A %x% A)) - A.tilde <- lav_matrix_bdiag(A11, A22) - Omega.f <- A.tilde %*% Omega.y %*% t(A.tilde) - - # add factor scores to data.frame - fit <- lavaan(PT.PA, data = cbind(data, FS.SCORES), - meanstructure = TRUE, - NACOV = Omega.f, - se = "robust", - fixed.x = FALSE) - } else { - # add factor scores to data.frame - fit <- lavaan(PT.PA, data = cbind(data, FS.SCORES), ...) - } - - } else if(fsr.method == "skrondal.laake") { - - # FIXME!!! rbind FS.SCORES in the multiple group case!!!! - if(ngroups > 1L) { - stop("lavaan ERROR: fsr code not ready for multiple groups (yet)") - } - FS.SCORES <- as.data.frame(FS.SCORES[[1]]) - - # apply bias-avoiding method - fit <- lavaan(PT.PA, data = cbind(data, FS.SCORES), ...) - - } else if(fsr.method == "croon") { + if(lavoptions$meanstructure) { + A11 <- A + A.tilde <- lav_matrix_bdiag(A11, A22) + } else { + A.tilde <- A22 + } + Omega.f[[g]] <- A.tilde %*% Omega.y %*% t(A.tilde) + } # g + } else { + Omega.f <- NULL + } - # apply bias-correcting method - # transform FS.SCORES - for(g in 1:ngroups) { - OLD.inv <- solve(FS.COV[[g]]) - OLD.inv.sqrt <- lav_matrix_symmetric_sqrt(OLD.inv) - FSR.COV.sqrt <- lav_matrix_symmetric_sqrt(FSR.COV[[g]]) - SC <- as.matrix(FS.SCORES[[g]]) - SC <- SC %*% OLD.inv.sqrt %*% FSR.COV.sqrt - SC <- as.data.frame(SC) - names(SC) <- lv.names - FSR.SCORES[[g]] <- SC - } + # fit structural model + lavoptions2 <- lavoptions + #lavoptions2$se <- "none" + #lavoptions2$test <- "none" + lavoptions2$missing <- "listwise" # always complete data anyway... + fit <- lavaan(PT.PA, + sample.cov = FSR.COV, + sample.mean = FS.MEAN, + sample.nobs = FIT@SampleStats@nobs, + NACOV = Omega.f, + slotOptions = lavoptions2) + + # extra info + extra <- list( FS.COV = FS.COV, FS.SCORES = FS.SCORES, + FSR.COV = FSR.COV, + LVINFO = LVINFO) + + PE <- parameterEstimates(fit, add.attributes = TRUE) + + # standard errors + #lavsamplestats <- fit@SampleStats + #lavsamplestats@NACOV <- Omega.f + #VCOV <- lav_model_vcov(fit@Model, lavsamplestats = lavsamplestats, + # lavoptions = lavoptions) + #SE <- lav_model_vcov_se(fit@Model, fit@ParTable, VCOV = VCOV) + #PE$se <- SE + #tmp.se <- ifelse(PE$se == 0.0, NA, PE$se) + #zstat <- pvalue <- TRUE + #if(zstat) { + # PE$z <- PE$est / tmp.se + # if(pvalue) { + # PE$pvalue <- 2 * (1 - pnorm( abs(PE$z) )) + # } + #} + + out <- list(header = "This is fsr (0.1) -- factor score regression.", + PE = PE) - # FIXME!!! rbind FS.SCORES in the multiple group case!!!! - if(ngroups > 1L) { - stop("lavaan ERROR: fsr code not ready for multiple groups (yet)") - } - FSR.SCORES <- as.data.frame(FSR.SCORES[[1]]) + if(lvinfo) { + out$lvinfo <- extra + } - # compute Omega.y (using NT for now) - DATA <- lavInspect(FIT, "data") - Omega.y <- lav_samplestats_Gamma_NT(Y = DATA, - meanstructure = TRUE, - rescale = TRUE, - fixed.x = FALSE) - - # factor score matrices - A <- lav_matrix_bdiag(lapply(LVINFO[[1]], "[[", "fsm")) - # compensate for Croon correction - if(fs.method == "regression") { - A <- OLD.inv.sqrt %*% FSR.COV.sqrt %*% A - } + class(out) <- c("lavaan.fsr", "list") - # mean + vech(sigma) - A11 <- A - A22 <- lav_matrix_duplication_post( - lav_matrix_duplication_ginv_pre(A %x% A)) - A.tilde <- lav_matrix_bdiag(A11, A22) - Omega.f <- A.tilde %*% Omega.y %*% t(A.tilde) - - # add factor scores to data.frame - fit <- lavaan(PT.PA, data = cbind(data, FSR.SCORES), - meanstructure = TRUE, - NACOV = Omega.f, - se = "robust", - fixed.x = FALSE) + out +} - } else { - stop("lavaan ERROR: fsr.method [", fsr.method, "] unknown", sep="") - } - # use 'external' slot to stores info - fit@external <- list( FS.COV = FS.COV, FS.SCORES = FS.SCORES, - FSR.COV = FSR.COV, FSR.SCORES = FSR.SCORES, - LVINFO = LVINFO) - - fit -} diff -Nru r-cran-lavaan-0.5.22/R/xxx_lavaanList.R r-cran-lavaan-0.5.23.1097/R/xxx_lavaanList.R --- r-cran-lavaan-0.5.22/R/xxx_lavaanList.R 2016-07-19 20:38:53.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/xxx_lavaanList.R 2017-01-27 19:49:24.000000000 +0000 @@ -1,5 +1,7 @@ # lavaanList: fit the *same* model, on different datasets -# YR - 29 June 2016 +# YR - 29 Jun 2016 + +# YR - 27 Jan 2017: change lavoptions; add dotdotdot to each call lavaanList <- function(model = NULL, # model dataList = NULL, # list of datasets @@ -78,27 +80,20 @@ FIT <- model } else { # adapt for FIT - dotdotdotFIT <- dotdotdot - dotdotdotFIT$do.fit <- TRUE # to get starting values - dotdotdotFIT$se <- "none" - dotdotdotFIT$test <- "none" - dotdotdotFIT$verbose <- FALSE - dotdotdotFIT$debug <- FALSE + #dotdotdotFIT <- dotdotdot + #dotdotdotFIT$do.fit <- TRUE # to get starting values + #dotdotdotFIT$se <- "none" + #dotdotdotFIT$test <- "none" + #dotdotdotFIT$verbose <- FALSE + #dotdotdotFIT$debug <- FALSE # initial model fit, using first dataset FIT <- do.call(cmd, args = c(list(model = model, - data = firstData), dotdotdotFIT) ) + data = firstData), dotdotdot) ) } - # use original dotdotdot to set user-specified options - opt.list <- formals(lavaan) - opt.list$categorical <- FIT@Options$categorical - opt.list$meanstructure <- FIT@Options$meanstructure - opt.list$conditional.x <- FIT@Options$conditional.x - lavoptions <- lav_options_set( modifyList(opt.list, - val = dotdotdot) ) - + lavoptions <- FIT@Options lavmodel <- FIT@Model lavpartable <- FIT@ParTable lavpta <- FIT@pta @@ -174,11 +169,12 @@ # fit model with this (new) dataset if(cmd %in% c("lavaan", "sem", "cfa", "growth")) { lavobject <- try(do.call("lavaan", - args = list(slotOptions = lavoptions, - slotParTable = lavpartable, - slotModel = lavmodel, - start = FIT, - data = DATA)), + args = c(list(slotOptions = lavoptions, + slotParTable = lavpartable, + slotModel = lavmodel, + start = FIT, + data = DATA), + dotdotdot)), silent = TRUE) } else if(cmd == "fsr") { # extract fs.method and fsr.method from dotdotdot @@ -195,23 +191,19 @@ } lavobject <- try(do.call("fsr", - args = list(slotOptions = lavoptions, - slotParTable = lavpartable, - slotModel = lavmodel, - start = FIT, - data = DATA, - cmd = "lavaan", - fs.method = fs.method, - fsr.method = fsr.method)), + args = c(list(slotOptions = lavoptions, + slotParTable = lavpartable, + slotModel = lavmodel, + start = FIT, + data = DATA, + cmd = "lavaan", + fs.method = fs.method, + fsr.method = fsr.method), + dotdotdot)), silent = TRUE) } else { stop("lavaan ERROR: unknown cmd: ", cmd) } - #lavobject <- try(lavaan(slotOptions = lavoptions, - # slotParTable = lavpartable, - # slotModel = lavmodel, - # start = FIT, - # data = DATA), silent = TRUE) RES <- list(ok = FALSE, timing = NULL, ParTable = NULL, Data = NULL, SampleStats = NULL, vcov = NULL, @@ -262,9 +254,6 @@ RES$fun <- FUN(lavobject) } - #if("coef" %in% output) { - # COEF[[i]] <- coef(lavobject) - #} } else { if(show.progress) { cat(" FAILED: no convergence\n") diff -Nru r-cran-lavaan-0.5.22/R/xxx_lavaan.R r-cran-lavaan-0.5.23.1097/R/xxx_lavaan.R --- r-cran-lavaan-0.5.22/R/xxx_lavaan.R 2016-09-21 11:37:52.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/xxx_lavaan.R 2017-02-24 11:01:59.000000000 +0000 @@ -6,77 +6,34 @@ # - merge cfa/sem/growth functions # YR 25/02/2012: changed data slot (from list() to S4); data@X contains data +# YR 26 Jan 2017: use '...' to capture the never-ending list of options + lavaan <- function(# user-specified model: can be syntax, parameter Table, ... model = NULL, - data = NULL, # second argument, most used! - model.type = "sem", - - # model modifiers - meanstructure = "default", - int.ov.free = FALSE, - int.lv.free = FALSE, - conditional.x = "default", # or FALSE? - fixed.x = "default", # or FALSE? - orthogonal = FALSE, - std.lv = FALSE, - parameterization = "default", - - auto.fix.first = FALSE, - auto.fix.single = FALSE, - auto.var = FALSE, - auto.cov.lv.x = FALSE, - auto.cov.y = FALSE, - auto.th = FALSE, - auto.delta = FALSE, - - # full data - std.ov = FALSE, - missing = "default", + # data (second argument, most used) + data = NULL, + + # variable information ordered = NULL, # summary data sample.cov = NULL, - sample.cov.rescale = "default", sample.mean = NULL, sample.nobs = NULL, - ridge = 1e-5, - # multiple groups + # multiple groups? group = NULL, - group.label = NULL, - group.equal = '', - group.partial = '', - group.w.free = FALSE, - # clusters + # multiple levels? cluster = NULL, - + # constraints constraints = '', - # estimation - estimator = "default", - likelihood = "default", - link = "default", - information = "default", - se = "default", - test = "default", - bootstrap = 1000L, - mimic = "default", - representation = "default", - do.fit = TRUE, - control = list(), + # user-specified variance matrices WLS.V = NULL, NACOV = NULL, - # zero values - zero.add = "default", - zero.keep.margins = "default", - zero.cell.warn = TRUE, - - # starting values - start = "default", - # full slots from previous fits slotOptions = NULL, slotParTable = NULL, @@ -85,26 +42,51 @@ slotModel = NULL, slotCache = NULL, - # sanity checks - check = c("start", "post"), - - # verbosity - verbose = FALSE, - warn = TRUE, - debug = FALSE - ) -{ + # options (dotdotdot) + ... + ) { # start timer start.time0 <- start.time <- proc.time()[3]; timing <- list() # 0a. store call - mc <- match.call() + mc <- match.call(expand.dots = TRUE) + + # handle dotdotdot + dotdotdot <- list(...) + # backwards compatibility, control= argument (<0.5-23) + if(!is.null(dotdotdot$control)) { + # optim.method + if(!is.null(dotdotdot$control$optim.method)) { + dotdotdot$optim.method <- dotdotdot$control$optim.method + } + # cor.optim.method + if(!is.null(dotdotdot$control$cor.optim.method)) { + dotdotdot$optim.method.cor <- dotdotdot$control$cor.optim.method + } + # control$optim.force.converged + if(!is.null(dotdotdot$control$optim.force.converged)) { + dotdotdot$optim.force.converged <- + dotdotdot$control$optim.force.converged + } + # gradient + if(!is.null(dotdotdot$control$gradient)) { + dotdotdot$optim.gradient <- dotdotdot$control$gradient + } + if(!is.null(dotdotdot$gradient)) { + dotdotdot$optim.gradient <- dotdotdot$gradient + } + # init_nelder_mead + if(!!is.null(dotdotdot$control$init_nelder_mead)) { + dotdotdot$optim.init_nelder_mead <- + dotdotdot$control$init_nelder_mead + } + } - ################################### - #### 1. ov.names + categorical #### - ################################### + ###################### + #### 1. ov.names #### + ###################### # 1a. get ov.names and ov.names.x (per group) -- needed for lavData() if(!is.null(slotParTable)) { FLAT <- slotParTable @@ -118,7 +100,7 @@ # two possibilities: either model is already lavaanified # or it is something else... - # look for the bare minimum columns: lhs - op - + # look for the bare minimum columns: lhs - op - rhs if(!is.null(model$lhs) && !is.null(model$op) && !is.null(model$rhs) && !is.null(model$free)) { @@ -130,6 +112,21 @@ # "auto.fix.first", "auto.fix.single", "auto.var", # "auto.cov.lv.x", "auto.cov.y", "auto.th", "auto.delta") FLAT <- model + + # fix semTools issue here? for auxiliary() which does not use + # block column yet + if(!is.null(FLAT$block)) { + N <- length(FLAT$lhs) + if(length(FLAT$block) != N) { + FLAT$block <- FLAT$group + } + if(any(is.na(FLAT$block))) { + FLAT$block <- FLAT$group + } + } else if(!is.null(FLAT$group)) { + FLAT$block <- FLAT$group + } + } else { bare.minimum <- c("lhs", "op", "rhs", "free") missing.idx <- is.na(match(bare.minimum, names(model))) @@ -140,56 +137,35 @@ } } - # blocks? - if(sum(FLAT$op == ":") == 0L) { - # no blocks: same set of variables per group/block - ov.names <- vnames(FLAT, type="ov") - ov.names.y <- vnames(FLAT, type="ov.nox") - ov.names.x <- vnames(FLAT, type="ov.x") - } else { - # possibly different set of variables per group/block - # FIXME: for now (0.5), we only 'recognize' groups - - # how many groups? - n.block.groups <- length( unique(FLAT$rhs[FLAT$op == ":" & - FLAT$lhs == "group"]) ) - ov.names <- lapply(1:n.block.groups, - function(x) vnames(FLAT, type="ov", group=x)) - ov.names.y <- lapply(1:n.block.groups, - function(x) vnames(FLAT, type="ov.nox", group=x)) - ov.names.x <- lapply(1:n.block.groups, - function(x) vnames(FLAT, type="ov.x", group=x)) - } - - # 1b categorical variables? -- needed for lavoptions - if(any(FLAT$op == "|")) { - categorical <- TRUE - # just in case, add lhs variables names to "ordered" - ordered <- unique(c(ordered, lavNames(FLAT, "ov.ord"))) - } else if(!is.null(data) && length(ordered) > 0L) { - categorical <- TRUE - } else if(is.data.frame(data) && - lav_dataframe_check_ordered(frame=data, ov.names=ov.names.y)) { - categorical <- TRUE - } else { - categorical <- FALSE - } - - # 1c meanstructure? -- needed for lavoptions - if(any(FLAT$op == "~1")) { - meanstructure <- TRUE - } - - # 1d exogenous covariates? if not, set conditional.x to FALSE - if( (is.list(ov.names.x) && sum(sapply(ov.names.x, FUN = length)) == 0L) || - (is.character(ov.names.x) && length(ov.names.x) == 0L) ) { - # if explicitly set to TRUE, give warning - if(is.logical(conditional.x) && conditional.x) { - warning("lavaan WARNING: no exogenous covariates; conditional.x will be set to FALSE") + # group blocks? + if(any(FLAT$op == ":" & FLAT$lhs == "group")) { + # here, we only need to figure out: + # - ngroups + # - ov's per group + # - FIXME: we need a more efficient way, avoiding lavaanify/vnames + group.idx <- which(FLAT$op == ":" & FLAT$lhs == "group") + tmp.group.values <- unique(FLAT$rhs[group.idx]) + tmp.ngroups <- length(tmp.group.values) + tmp.lav <- lavaanify(FLAT, ngroups = tmp.ngroups) + ov.names <- ov.names.y <- ov.names.x <- vector("list", + length = tmp.ngroups) + for(g in seq_len(tmp.ngroups)) { + ov.names[[g]] <- unique(unlist(lav_partable_vnames(tmp.lav, + type = "ov", group = tmp.group.values[g]))) + ov.names.y[[g]] <- unique(unlist(lav_partable_vnames(tmp.lav, + type = "ov.nox", group = tmp.group.values[g]))) + ov.names.x[[g]] <- unique(unlist(lav_partable_vnames(tmp.lav, + type = "ov.x", group = tmp.group.values[g]))) } - conditional.x <- FALSE + } else { + # no blocks: same set of variables per group/block + ov.names <- lav_partable_vnames(FLAT, type = "ov") + ov.names.y <- lav_partable_vnames(FLAT, type = "ov.nox") + ov.names.x <- lav_partable_vnames(FLAT, type = "ov.x") } + # sanity check ordered argument (just in case, add lhs variables names) + ordered <- unique(c(ordered, lavNames(FLAT, "ov.ord"))) @@ -199,40 +175,71 @@ ####################### #### 2. lavoptions #### ####################### - #opt <- modifyList(formals(lavaan), as.list(mc)[-1]) - # force evaluation of `language` and/or `symbol` arguments - #opt <- lapply(opt, function(x) if(typeof(x) %in% c("language", "symbol")) - # eval(x, parent.frame()) else x) if(!is.null(slotOptions)) { lavoptions <- slotOptions } else { - opt <- list(model = model, model.type = model.type, - meanstructure = meanstructure, - int.ov.free = int.ov.free, int.lv.free = int.lv.free, - conditional.x = conditional.x, fixed.x = fixed.x, - orthogonal = orthogonal, std.lv = std.lv, - parameterization = parameterization, - auto.fix.first = auto.fix.first, auto.fix.single = auto.fix.single, - auto.var = auto.var, auto.cov.lv.x = auto.cov.lv.x, - auto.cov.y = auto.cov.y, auto.th = auto.th, - auto.delta = auto.delta, missing = missing, - group = group, categorical = categorical, - group.equal = group.equal, group.partial = group.partial, - group.w.free = group.w.free, - constraints = constraints, - estimator = estimator, likelihood = likelihood, link = link, - sample.cov.rescale = sample.cov.rescale, - information = information, se = se, test = test, - bootstrap = bootstrap, mimic = mimic, - zero.add = zero.add, zero.keep.margins = zero.keep.margins, - zero.cell.warn = zero.cell.warn, - representation = representation, do.fit = do.fit, verbose = verbose, - warn = warn, debug = debug) + + # load default options + opt <- lav_options_default() + + # modifyList + opt <- modifyList(opt, dotdotdot) + + # categorical mode? + if(any(FLAT$op == "|")) { + opt$categorical <- TRUE + } else if(!is.null(data) && length(ordered) > 0L) { + opt$categorical <- TRUE + } else if(is.data.frame(data) && + lav_dataframe_check_ordered(frame = data, ov.names = ov.names.y)) { + opt$categorical <- TRUE + } else { + opt$categorical <- FALSE + } + + # constraints + if(nchar(constraints) > 0L) { + opt$information <- "observed" + } + + # meanstructure + if(any(FLAT$op == "~1")) { + opt$meanstructure <- TRUE + } + if(!is.null(group) && is.null(dotdotdot$meanstructure)) { + opt$meanstructure <- TRUE + } + + # conditional.x + if( (is.list(ov.names.x) && + sum(sapply(ov.names.x, FUN = length)) == 0L) || + (is.character(ov.names.x) && length(ov.names.x) == 0L) ) { + # if explicitly set to TRUE, give warning + if(is.logical(dotdotdot$conditional.x) && dotdotdot$conditional.x) { + warning("lavaan WARNING: no exogenous covariates; conditional.x will be set to FALSE") + } + opt$conditional.x <- FALSE + } + + # fixed.x + if( (is.list(ov.names.x) && + sum(sapply(ov.names.x, FUN = length)) == 0L) || + (is.character(ov.names.x) && length(ov.names.x) == 0L) ) { + # if explicitly set to TRUE, give warning + if(is.logical(dotdotdot$fixed.x) && dotdotdot$fixed.x) { + # ok, we respect this: keep fixed.x = TRUE + } else { + opt$fixed.x <- FALSE + } + } + + # fill in remaining "default" values lavoptions <- lav_options_set(opt) } timing$InitOptions <- (proc.time()[3] - start.time) start.time <- proc.time()[3] + # some additional checks for estimator="PML" if(lavoptions$estimator == "PML") { ovy <- unique( unlist(ov.names.y) ) @@ -282,40 +289,50 @@ } lavdata <- lavData(data = data, group = group, - group.label = group.label, + cluster = cluster, ov.names = ov.names, - ordered = ordered, ov.names.x = ov.names.x, - std.ov = std.ov, - missing = lavoptions$missing, + ordered = ordered, sample.cov = sample.cov, sample.mean = sample.mean, sample.nobs = sample.nobs, - warn = lavoptions$warn) + lavoptions = lavoptions) } # what have we learned from the data? if(lavdata@data.type == "none") { - do.fit <- FALSE; start <- "simple" - lavoptions$se <- "none"; lavoptions$test <- "none" + lavoptions$do.fit <- FALSE + lavoptions$start <- "simple" + lavoptions$se <- "none" + lavoptions$test <- "none" } else if(lavdata@data.type == "moment") { # catch here some options that will not work with moments if(lavoptions$se == "bootstrap") { stop("lavaan ERROR: bootstrapping requires full data") } - if(estimator %in% c("MLM", "MLMV", "MLMVS", "MLR", - "ULSM", "ULSMV", "ULSMVS") && is.null(NACOV)) { - stop("lavaan ERROR: estimator ", estimator, " requires full data or user-provided NACOV") + if(lavoptions$estimator %in% c("MLM", "MLMV", "MLMVS", "MLR", "ULSM", + "ULSMV", "ULSMVS") && is.null(NACOV)) { + stop("lavaan ERROR: estimator ", lavoptions$estimator, + " requires full data or user-provided NACOV") } - if(estimator %in% c("WLS", "WLSM", "WLSMV", "WLSMVS", "DWLS") && + if(lavoptions$estimator %in% + c("WLS", "WLSM", "WLSMV", "WLSMVS", "DWLS") && is.null(WLS.V)) { - stop("lavaan ERROR: estimator ", estimator, " requires full data or user-provided WLS.V") + stop("lavaan ERROR: estimator ", lavoptions$estimator, + " requires full data or user-provided WLS.V") } } timing$InitData <- (proc.time()[3] - start.time) start.time <- proc.time()[3] - if(debug) print(str(lavdata)) - + if(lavoptions$debug) { + print(str(lavdata)) + } + # if lavdata@nlevels > 1L, adapt start option (for now) + # until we figure out how to handle groups+blocks + if(lavdata@nlevels > 1L) { + lavoptions$start <- "simple" + } + @@ -330,7 +347,9 @@ lavpartable <- slotParTable } else if(is.character(model)) { # check FLAT before we proceed - if(debug) print(as.data.frame(FLAT)) + if(lavoptions$debug) { + print(as.data.frame(FLAT)) + } # catch ~~ of fixed.x covariates if fixed.x = TRUE if(lavoptions$fixed.x) { tmp <- try(vnames(FLAT, type = "ov.x", ov.x.fatal = TRUE), @@ -346,6 +365,10 @@ lavpartable <- lavaanify(model = FLAT, + constraints = constraints, + varTable = lavdata@ov, + ngroups = lavdata@ngroups, + meanstructure = lavoptions$meanstructure, int.ov.free = lavoptions$int.ov.free, int.lv.free = lavoptions$int.lv.free, @@ -354,8 +377,6 @@ fixed.x = lavoptions$fixed.x, std.lv = lavoptions$std.lv, parameterization = lavoptions$parameterization, - constraints = constraints, - auto.fix.first = lavoptions$auto.fix.first, auto.fix.single = lavoptions$auto.fix.single, auto.var = lavoptions$auto.var, @@ -363,9 +384,6 @@ auto.cov.y = lavoptions$auto.cov.y, auto.th = lavoptions$auto.th, auto.delta = lavoptions$auto.delta, - - varTable = lavdata@ov, - ngroups = lavdata@ngroups, group.equal = lavoptions$group.equal, group.partial = lavoptions$group.partial, group.w.free = lavoptions$group.w.free, @@ -379,19 +397,22 @@ } else if(is.list(model)) { # we already checked this when creating FLAT # but we may need to complete it - lavpartable <- as.list(model) # in case model is a data.frame + lavpartable <- as.list(FLAT) # in case model is a data.frame # complete table lavpartable <- lav_partable_complete(lavpartable) } else { - stop("lavaan ERROR: model [type = ", class(model), + stop("lavaan ERROR: model [type = ", class(model), "] is not of type character or list") } - if(debug) print(as.data.frame(lavpartable)) + if(lavoptions$debug) { + print(as.data.frame(lavpartable)) + } # at this point, we should check if the partable is complete # or not; this is especially relevant if the lavaan() function # was used, but the user has forgotten some variances/intercepts... - junk <- lav_partable_check(lavpartable, categorical = categorical, + junk <- lav_partable_check(lavpartable, + categorical = lavoptions$categorical, warn = TRUE) # 4b. get partable attributes @@ -426,10 +447,10 @@ missing.h1 = (lavoptions$missing != "listwise"), WLS.V = WLS.V, NACOV = NACOV, - ridge = ridge, - optim.method = - ifelse(!is.null(control$cor.optim.method), - control$cor.optim.method, "nlminb"), + se = lavoptions$se, + information = lavoptions$information, + ridge = lavoptions$ridge, + optim.method = lavoptions$optim.method.cor, zero.add = lavoptions$zero.add, zero.keep.margins = lavoptions$zero.keep.margins, zero.cell.warn = lavoptions$zero.cell.warn, @@ -448,23 +469,21 @@ group.w.free = lavoptions$group.w.free, WLS.V = WLS.V, NACOV = NACOV, - ridge = ridge, + ridge = lavoptions$ridge, rescale = lavoptions$sample.cov.rescale) } else { # no data - th.idx <- vector("list", length=lavdata@ngroups) - for(g in 1:lavdata@ngroups) { - th.idx[[g]] <- lav_partable_ov_idx(lavpartable, type="th") - } lavsamplestats <- new("lavSampleStats", ngroups=lavdata@ngroups, nobs=as.list(rep(0L, lavdata@ngroups)), cov.x=vector("list",length=lavdata@ngroups), - th.idx=th.idx, + th.idx=lavpta$vidx$th.mean, missing.flag=FALSE) } timing$Sample <- (proc.time()[3] - start.time) start.time <- proc.time()[3] - if(debug) print(str(lavsamplestats)) + if(lavoptions$debug) { + print(str(lavsamplestats)) + } @@ -488,7 +507,7 @@ start.time <- proc.time()[3] } else { # check if we have provide a full parameter table as model= input - if(!is.null(lavpartable$est) && start == "default") { + if(!is.null(lavpartable$est) && lavoptions$start == "default") { # check if all 'est' values look ok # this is not the case, eg, if partables have been merged eg, as # in semTools' auxiliary() function @@ -500,7 +519,7 @@ lavpartable$est == 0) if(length(zero.idx) > 0L || any(is.na(lavpartable$est))) { - lavpartable$start <- lav_start(start.method = start, + lavpartable$start <- lav_start(start.method = lavoptions$start, lavpartable = lavpartable, lavsamplestats = lavsamplestats, model.type = lavoptions$model.type, @@ -510,7 +529,7 @@ lavpartable$start <- lavpartable$est } } else { - START <- lav_start(start.method = start, + START <- lav_start(start.method = lavoptions$start, lavpartable = lavpartable, lavsamplestats = lavsamplestats, model.type = lavoptions$model.type, @@ -518,7 +537,7 @@ debug = lavoptions$debug) # sanity check - if("start" %in% check) { + if("start" %in% lavoptions$check) { START <- lav_start_check_cov(lavpartable = lavpartable, start = START) } @@ -532,30 +551,19 @@ - - - ##################### #### 7. lavmodel #### ##################### - lavmodel <- - lav_model(lavpartable = lavpartable, - representation = lavoptions$representation, - conditional.x = lavoptions$conditional.x, - th.idx = lavsamplestats@th.idx, - parameterization = lavoptions$parameterization, - link = lavoptions$link, - control = control, - debug = lavoptions$debug) + lavmodel <- lav_model(lavpartable = lavpartable, + lavoptions = lavoptions, + th.idx = lavsamplestats@th.idx) timing$Model <- (proc.time()[3] - start.time) start.time <- proc.time()[3] # if no data, call lav_model_set_parameters once (for categorical case) if(lavdata@data.type == "none" && lavmodel@categorical) { - lavmodel <- - lav_model_set_parameters(lavmodel = lavmodel, - x = lav_model_get_parameters(lavmodel), - estimator =lavoptions$estimator) + lavmodel <- lav_model_set_parameters(lavmodel = lavmodel, + x = lav_model_get_parameters(lavmodel)) } } # slotModel @@ -581,7 +589,8 @@ BI <- lav_tables_pairwise_freq_cell(lavdata) # handle option missing = "available.cases" - if(lavoptions$missing == "available.cases") { + if(lavoptions$missing == "available.cases" || + lavoptions$missing == "doubly.robust") { UNI <- lav_tables_univariate_freq_cell(lavdata) } @@ -589,15 +598,12 @@ if (lavoptions$missing == "doubly.robust") { # check whether the probabilities pairwiseProbGivObs and # univariateProbGivObs are given by the user - if(is.null(control$pairwiseProbGivObs)) { + if(is.null(lavoptions$control$pairwiseProbGivObs)) { stop("lavaan ERROR: could not find `pairwiseProbGivObs' in control() list") } - if(is.null(control$univariateProbGivObs)) { + if(is.null(lavoptions$control$univariateProbGivObs)) { stop("lavaan ERROR: could not find `univariateProbGivObs' in control() list") } - if(is.null(control$FitFunctionConst)) { - stop("lavaan ERROR: could not find `FitFunctionConst' in control() list") - } } for(g in 1:lavdata@ngroups) { @@ -618,7 +624,8 @@ LONG = LONG) # available cases - if(lavoptions$missing == "available.cases") { + if(lavoptions$missing == "available.cases" || + lavoptions$missing == "doubly.robust") { if(is.null(UNI$group) || max(UNI$group) == 1L) { unifreq <- UNI$obs.freq uninobs <- UNI$nobs @@ -637,24 +644,27 @@ # order as unifreq; i.e. w_ia, i=1,...,p, (p variables), # a=1,...,Ci, (Ci response categories for variable i), # a running faster than i - lavcache[[g]]$uniweights <- c( apply(lavdata@X[[g]], 2, + tmp.uniweights <- apply(lavdata@X[[g]], 2, function(x){ tapply(uniweights.casewise, as.factor(x), sum, - na.rm=TRUE) } )) - } # "available.cases" + na.rm=TRUE) } ) + if( is.matrix(tmp.uniweights) ) { + lavcache[[g]]$uniweights <- c(tmp.uniweights) + } + if( is.list(tmp.uniweights) ) { + lavcache[[g]]$uniweights <- unlist(tmp.uniweights) + } + } # "available.cases" or "double.robust" - # doubly.robust" + # doubly.robust only if (lavoptions$missing == "doubly.robust") { - # add fit constant - lavcache[[g]]$FitFunctionConst <- control$FitFunctionConst - # add the provided by the user probabilities # pairwiseProbGivObs and univariateProbGivObs in Cache lavcache[[g]]$pairwiseProbGivObs <- - control$pairwiseProbGivObs[[g]] + lavoptions$control$pairwiseProbGivObs[[g]] lavcache[[g]]$univariateProbGivObs <- - control$univariateProbGivObs[[g]] + lavoptions$control$univariateProbGivObs[[g]] # compute different indices vectors that will help to do # calculations ind.vec <- as.data.frame(LONG[1:5] ) @@ -676,23 +686,27 @@ # elements of vector probY1Gy2 refer to nlev <- lavdata@ov$nlev nvar <- length(nlev) - idx.Y1 <- c() - idx.Gy2 <- c() - idx.cat.Y1 <- c() - idx.cat.Gy2 <- c() - for(i in 1:nvar) { - for(j in 1:nvar) { - if(i != j) { - idx.cat.Y1 <- c(idx.cat.Y1, - rep(1:nlev[i], times=nlev[j])) - idx.cat.Gy2 <- c(idx.cat.Gy2, - rep(1:nlev[j], each=nlev[i])) - tmp.lngth <- nlev[i]*nlev[j] - idx.Y1 <- c(idx.Y1, rep(i, tmp.lngth) ) - idx.Gy2 <- c(idx.Gy2, rep(j, tmp.lngth) ) - } - } - } + + idx.var.matrix <- matrix(1:nvar, nrow=nvar, ncol=nvar) + idx.diag <- diag( matrix(1:(nvar*nvar), nrow=nvar, + ncol=nvar) ) + idx.Y1Gy2.matrix <- rbind(t(idx.var.matrix)[-idx.diag], + idx.var.matrix [-idx.diag]) + no.pairs.Y1Gy2 <- ncol(idx.Y1Gy2.matrix) + idx.cat.Y1 <- unlist(lapply(1:no.pairs.Y1Gy2, function(x) { + rep( 1:nlev[ idx.Y1Gy2.matrix[1,x] ], + times= nlev[ idx.Y1Gy2.matrix[2,x] ] )} ) ) + idx.cat.Gy2 <- unlist(lapply(1:no.pairs.Y1Gy2, function(x) { + rep( 1:nlev[ idx.Y1Gy2.matrix[2,x] ], + each= nlev[ idx.Y1Gy2.matrix[1,x] ] )} ) ) + dim.pairs <- unlist(lapply(1:no.pairs.Y1Gy2, function(x) { + nlev[ idx.Y1Gy2.matrix[1,x] ] * + nlev[ idx.Y1Gy2.matrix[2,x] ] }) ) + idx.Y1 <- unlist( mapply(rep, idx.Y1Gy2.matrix[1,], + each=dim.pairs) ) + idx.Gy2 <- unlist( mapply(rep, idx.Y1Gy2.matrix[2,], + each=dim.pairs) ) + lavcache[[g]]$idx.Y1 <- idx.Y1 lavcache[[g]]$idx.Gy2 <- idx.Gy2 lavcache[[g]]$idx.cat.Y1 <- idx.cat.Y1 @@ -720,19 +734,15 @@ # If estimator = MML, store Gauss-Hermite nodes/weights if(lavoptions$estimator == "MML") { - if(!is.null(control$nGH)) { - nGH <- control$nGH - } else { - nGH <- 21L - } for(g in 1:lavdata@ngroups) { # count only the ones with non-normal indicators #nfac <- lavpta$nfac.nonnormal[[g]] nfac <- lavpta$nfac[[g]] lavcache[[g]]$GH <- - lav_integration_gauss_hermite_dnorm(n = nGH, mean = 0, sd = 1, - ndim = nfac, - revert = FALSE) + lav_integration_gauss_hermite(n = lavoptions$integration.ngh, + dnorm = TRUE, + mean = 0, sd = 1, + ndim = nfac) #lavcache[[g]]$DD <- lav_model_gradient_DD(lavmodel, group = g) } } @@ -749,7 +759,7 @@ #### 10. est + lavoptim #### ############################ x <- NULL - if(do.fit && lavoptions$estimator != "none" && + if(lavoptions$do.fit && lavoptions$estimator != "none" && lavmodel@nx.free > 0L) { x <- lav_model_estimate(lavmodel = lavmodel, @@ -757,8 +767,7 @@ lavdata = lavdata, lavoptions = lavoptions, lavcache = lavcache) - lavmodel <- lav_model_set_parameters(lavmodel, x = x, - estimator = lavoptions$estimator) + lavmodel <- lav_model_set_parameters(lavmodel, x = x) # store parameters in @ParTable$est lavpartable$est <- lav_model_get_parameters(lavmodel = lavmodel, type = "user", extra = TRUE) @@ -774,19 +783,18 @@ } else { x <- numeric(0L) attr(x, "iterations") <- 0L; attr(x, "converged") <- FALSE - attr(x, "control") <- control - attr(x, "fx") <- - lav_model_objective(lavmodel = lavmodel, - lavsamplestats = lavsamplestats, lavdata = lavdata, - lavcache = lavcache, estimator = lavoptions$estimator) + attr(x, "control") <- lavoptions$control + attr(x, "fx") <- + lav_model_objective(lavmodel = lavmodel, + lavsamplestats = lavsamplestats, lavdata = lavdata, + lavcache = lavcache) lavpartable$est <- lavpartable$start } # should we fake/force convergence? (eg. to enforce the # computation of a test statistic) - if(!is.null(control$optim.force.converged) && - control$optim.force.converged) { + if(lavoptions$optim.force.converged) { attr(x, "converged") <- TRUE } @@ -840,14 +848,19 @@ VCOV <- NULL if(lavoptions$se != "none" && lavoptions$se != "external" && lavmodel@nx.free > 0L && attr(x, "converged")) { - if(verbose) cat("Computing VCOV for se =", lavoptions$se, "...") + if(lavoptions$verbose) { + cat("Computing VCOV for se =", lavoptions$se, "...") + } VCOV <- lav_model_vcov(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavoptions = lavoptions, lavdata = lavdata, lavpartable = lavpartable, - lavcache = lavcache) - if(verbose) cat(" done.\n") + lavcache = lavcache, + lavimplied = lavimplied) + if(lavoptions$verbose) { + cat(" done.\n") + } } # extract bootstrap results (if any) @@ -897,7 +910,9 @@ ##################### TEST <- NULL if(lavoptions$test != "none" && attr(x, "converged")) { - if(verbose) cat("Computing TEST for test =", lavoptions$test, "...") + if(lavoptions$verbose) { + cat("Computing TEST for test =", lavoptions$test, "...") + } TEST <- lav_model_test(lavmodel = lavmodel, lavpartable = lavpartable, lavsamplestats = lavsamplestats, @@ -906,7 +921,9 @@ VCOV = VCOV, lavdata = lavdata, lavcache = lavcache) - if(verbose) cat(" done.\n") + if(lavoptions$verbose) { + cat(" done.\n") + } } else { TEST <- list(list(test="none", stat=NA, stat.group=rep(NA, lavdata@ngroups), df=NA, @@ -971,7 +988,7 @@ # post-fitting check - if("post" %in% check && lavTech(lavaan, "converged")) { + if("post" %in% lavoptions$check && lavTech(lavaan, "converged")) { lavInspect(lavaan, "post.check") } @@ -981,30 +998,51 @@ # cfa + sem -cfa <- sem <- function(model = NULL, data = NULL, - meanstructure = "default", - conditional.x = "default", fixed.x = "default", - orthogonal = FALSE, std.lv = FALSE, - parameterization = "default", std.ov = FALSE, - missing = "default", ordered = NULL, - sample.cov = NULL, sample.cov.rescale = "default", sample.mean = NULL, - sample.nobs = NULL, ridge = 1e-5, - group = NULL, group.label = NULL, - group.equal = "", group.partial = "", group.w.free = FALSE, - cluster = NULL, constraints = "", - estimator = "default", likelihood = "default", link = "default", - information = "default", se = "default", test = "default", - bootstrap = 1000L, mimic = "default", representation = "default", - do.fit = TRUE, control = list(), WLS.V = NULL, NACOV = NULL, - zero.add = "default", zero.keep.margins = "default", - zero.cell.warn = TRUE, start = "default", - check = c("start", "post"), - verbose = FALSE, warn = TRUE, debug = FALSE) { +cfa <- sem <- function(# user-specified model: can be syntax, parameter Table + model = NULL, + # data (second argument, most used) + data = NULL, + + # variable information + ordered = NULL, + + # summary data + sample.cov = NULL, + sample.mean = NULL, + sample.nobs = NULL, + + # multiple groups? + group = NULL, + + # multiple levels? + cluster = NULL, - mc <- match.call() + # constraints + constraints = '', + # user-specified variance matrices + WLS.V = NULL, + NACOV = NULL, + + # options (dotdotdot) + ...) { + + mc <- match.call(expand.dots = TRUE) + + # set model.type mc$model.type = as.character( mc[[1L]] ) - if(length(mc$model.type) == 3L) mc$model.type <- mc$model.type[3L] + if(length(mc$model.type) == 3L) { + mc$model.type <- mc$model.type[3L] + } + + dotdotdot <- list(...) + if(!is.null(dotdotdot$std.lv)) { + std.lv <- dotdotdot$std.lv + } else { + std.lv <- FALSE + } + + # default options for sem/cfa call mc$int.ov.free = TRUE mc$int.lv.free = FALSE mc$auto.fix.first = !std.lv @@ -1014,33 +1052,58 @@ mc$auto.cov.y = TRUE mc$auto.th = TRUE mc$auto.delta = TRUE - mc[[1L]] <- quote(lavaan::lavaan) + # call mother function + mc[[1L]] <- quote(lavaan::lavaan) eval(mc, parent.frame()) } # simple growth models -growth <- function(model = NULL, data = NULL, - conditional.x = "default", fixed.x = "default", - orthogonal = FALSE, std.lv = FALSE, - parameterization = "default", std.ov = FALSE, - missing = "default", ordered = NULL, - sample.cov = NULL, sample.cov.rescale = "default", sample.mean = NULL, - sample.nobs = NULL, ridge = 1e-5, - group = NULL, group.label = NULL, - group.equal = "", group.partial = "", group.w.free = FALSE, - cluster = NULL, constraints = "", - estimator = "default", likelihood = "default", link = "default", - information = "default", se = "default", test = "default", - bootstrap = 1000L, mimic = "default", representation = "default", - do.fit = TRUE, control = list(), WLS.V = NULL, NACOV = NULL, - zero.add = "default", zero.keep.margins = "default", - zero.cell.warn = TRUE, start = "default", - check = c("start", "post"), - verbose = FALSE, warn = TRUE, debug = FALSE) { +growth <- function(# user-specified model: can be syntax, parameter Table + model = NULL, + # data (second argument, most used) + data = NULL, + + # variable information + ordered = NULL, + + # summary data + sample.cov = NULL, + sample.mean = NULL, + sample.nobs = NULL, + + # multiple groups? + group = NULL, + + # multiple levels? + cluster = NULL, - mc <- match.call() + # constraints + constraints = '', + + # user-specified variance matrices + WLS.V = NULL, + NACOV = NULL, + + # options (dotdotdot) + ...) { + mc <- match.call(expand.dots = TRUE) + + # set model.type + mc$model.type = as.character( mc[[1L]] ) + if(length(mc$model.type) == 3L) { + mc$model.type <- mc$model.type[3L] + } + + dotdotdot <- list(...) + if(!is.null(dotdotdot$std.lv)) { + std.lv <- dotdotdot$std.lv + } else { + std.lv <- FALSE + } + + # default options for sem/cfa call mc$model.type = "growth" mc$int.ov.free = FALSE mc$int.lv.free = TRUE @@ -1051,7 +1114,8 @@ mc$auto.cov.y = TRUE mc$auto.th = TRUE mc$auto.delta = TRUE - mc[[1L]] <- quote(lavaan::lavaan) + # call mother function + mc[[1L]] <- quote(lavaan::lavaan) eval(mc, parent.frame()) } diff -Nru r-cran-lavaan-0.5.22/R/xxx_prelav.R r-cran-lavaan-0.5.23.1097/R/xxx_prelav.R --- r-cran-lavaan-0.5.22/R/xxx_prelav.R 2016-06-13 08:38:01.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/R/xxx_prelav.R 2017-01-31 18:48:42.000000000 +0000 @@ -38,9 +38,9 @@ lav.data <- lavData(data = object, group = group, ov.names = NAMES, ordered = ordered, ov.names.x = ov.names.x, - missing = missing) + lavoptions = list(missing = missing)) - lav.stats <- lav_samplestats_from_data(lavdata = lav.data, + lav.stats <- lav_samplestats_from_data(lavdata = lav.data, missing = missing, rescale = FALSE, estimator = "ML", diff -Nru r-cran-lavaan-0.5.22/tests/testthat/helper-skip_level.R r-cran-lavaan-0.5.23.1097/tests/testthat/helper-skip_level.R --- r-cran-lavaan-0.5.22/tests/testthat/helper-skip_level.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/tests/testthat/helper-skip_level.R 2016-12-30 16:38:59.000000000 +0000 @@ -0,0 +1,8 @@ +skip_level <- function(test_lvl){ + lvl <- if (nzchar(s <- Sys.getenv("LAV_TEST_LEVEL")) && + is.finite(s <- as.numeric(s))) s + else 1 + + if (test_lvl > lvl) testthat::skip(paste("test level", test_lvl, ">", lvl)) +} + diff -Nru r-cran-lavaan-0.5.22/tests/testthat/test-lav_matrix.R r-cran-lavaan-0.5.23.1097/tests/testthat/test-lav_matrix.R --- r-cran-lavaan-0.5.22/tests/testthat/test-lav_matrix.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/tests/testthat/test-lav_matrix.R 2016-12-30 16:38:59.000000000 +0000 @@ -0,0 +1,28 @@ +context("lav_matrix") + +A <- matrix(1:16, nrow=2) +A_sqr <- matrix(1:16, nrow=4) +A_sym <- matrix(1:9, nrow=3); A_sym[upper.tri(A_sym)] <- t(A_sym)[upper.tri(A_sym)] + +test_that("lav_matrix_vech matches using lower.tri", { + expect_identical( + lav_matrix_vech(A), + A[lower.tri(A, diag = TRUE)] + ) +}) + +test_that("lav_matrix_vech without diagonal matches using lower.tri", { + expect_identical( + lav_matrix_vech(A, diagonal = FALSE), + A[lower.tri(A)]) +}) + +test_that("lav_matrix_vech and lav_matrix_vechru are identical on a symmetric matrix", { + for (diagonal in c(TRUE, FALSE)) + expect_identical(lav_matrix_vech(A_sym, diagonal), lav_matrix_vechru(A_sym, diagonal)) +}) + +test_that("lav_matrix_vechr and lav_matrix_vechu are identical on a symmetric matrix", { + for (diagonal in c(TRUE, FALSE)) + expect_identical(lav_matrix_vechr(A_sym, diagonal), lav_matrix_vechu(A_sym, diagonal)) +}) \ No newline at end of file diff -Nru r-cran-lavaan-0.5.22/tests/testthat/test-lav_mvnorm.R r-cran-lavaan-0.5.23.1097/tests/testthat/test-lav_mvnorm.R --- r-cran-lavaan-0.5.22/tests/testthat/test-lav_mvnorm.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/tests/testthat/test-lav_mvnorm.R 2017-01-29 20:40:15.000000000 +0000 @@ -0,0 +1,143 @@ +### Terrence D. Jorgensen +### Last updated: 25 January 2017 +### test lav_mvnorm_* functions + +context("lav_mvnorm_*") + + +## complete data +varnames <- paste("x", 1:9, sep = "") +H9 <- HolzingerSwineford1939[ , varnames] +## impose missingness +H9miss <- H9 +H9miss$x5 <- ifelse(H9miss$x1 <= quantile(H9miss$x1, .3), NA, H9miss$x5) +H9miss$x9 <- ifelse(H9miss$x4 <= quantile(H9miss$x4, .3), NA, H9miss$x9) +## fit model to complete and incomplete data +HS.model <- ' + visual =~ x1 + x2 + x3 + textual =~ x4 + x5 + x6 + speed =~ x7 + x8 + x9 +' + + +######################### +## Test Complete cases ## +######################### + +cfit <- cfa(HS.model, data = H9, meanstructure = TRUE) + +## save summary statistics +cM <- lavInspect(cfit, "sampstat", add.class = FALSE)$mean # matches round(colMeans(H9), 3) +cS <- lavInspect(cfit, "sampstat", add.class = FALSE)$cov # matches round(cov(H9)*300/301, 3) +## model-implied moments +cMu <- lavInspect(cfit, "mean.ov", add.class = FALSE) +cSigma <- lavInspect(cfit, "cov.ov", add.class = FALSE) + + +## sum casewise log-likelihoods under saturated model +cLL1 <- fitMeasures(cfit)[["unrestricted.logl"]] +cLL2 <- sum(mnormt::dmnorm(H9, mean = cM, varcov = cS, log = TRUE)) +#cLL3 <- sum(mvtnorm::dmvnorm(H9, mean = cM, sigma = cS, log = TRUE)) +## functions of actual interest +cLL4 <- sum(lav_mvnorm_dmvnorm(Y = as.matrix(H9), Mu = cM, Sigma = cS)) +cLL5 <- lav_mvnorm_h1_loglik_data(as.matrix(H9), casewise = FALSE) +cLL6 <- sum(lav_mvnorm_h1_loglik_data(as.matrix(H9), casewise = TRUE)) + +test_that("6 saturated log-likelihoods match for complete data", { + expect_equal(cLL1, cLL2) +# expect_equal(cLL1, cLL3) + expect_equal(cLL1, cLL4) + expect_equal(cLL1, cLL5) + expect_equal(cLL1, cLL6) +}) +rm(cLL1, cLL2, + #cLL3, + cLL4, cLL5, cLL6) + + +## sum casewise log-likelihoods under target model +cLL1 <- fitMeasures(cfit)[["logl"]] +cLL2 <- sum(mnormt::dmnorm(H9, mean = cMu, varcov = cSigma, log = TRUE)) +#cLL3 <- sum(mvtnorm::dmvnorm(H9, mean = cMu, sigma = cSigma, log = TRUE)) +cLL4 <- sum(lav_mvnorm_dmvnorm(Y = as.matrix(H9), Mu = cMu, Sigma = cSigma)) +cLL5 <- lav_mvnorm_loglik_samplestats(sample.mean = cM, sample.cov = cS, + sample.nobs = nobs(cfit), + Mu = cMu, Sigma = cSigma) + +test_that("5 target-model log-likelihoods match for complete data", { + expect_equal(cLL1, cLL2) +# expect_equal(cLL1, cLL3) + expect_equal(cLL1, cLL4) + expect_equal(cLL1, cLL5) +}) +rm(cLL1, cLL2, + #cLL3, + cLL4, cLL5) + + +################## +## Missing Data ## +################## + +mfit <- cfa(HS.model, data = H9miss, meanstructure = TRUE, missing = "fiml") + +## list per missind-data pattern +lavInspect(mfit, "coverage") +pattern <- lavInspect(mfit, "pattern") +H9logic <- !is.na(H9miss) + +## indicators for which pattern each row belongs to # lav_data_missing_patterns(H9miss)$case.idx +indPatterns <- sapply(1:4, function(pp) { + apply(H9logic, 1, function(x) all(x == pattern[pp, ])) +}) +all(rowSums(indPatterns) == 1) # check exactly 1 pattern per person + +## lists of sample stats per pattern +# (mN <- colSums(indPatterns)) # N per pattern +# mM <- lapply(1:4, function(pp) { +# colMeans(H9miss[indPatterns[,pp], varnames[ pattern[pp,] ] ]) +# }) +# mS <- lapply(1:4, function(pp) { +# cov(H9miss[indPatterns[,pp], varnames[pattern[pp,]]]) * (mN[pp] - 1) / mN[pp] +# }) +## lists of model-implied moments +mMu <- lavInspect(mfit, "mean.ov", add.class = FALSE) +mSigma <- lavInspect(mfit, "cov.ov", add.class = FALSE) + + + + +## sum casewise log-likelihoods under saturated model for each pattern +mLL1 <- fitMeasures(mfit)[["logl"]] +mLL2 <- sum(sapply(1:4, function(pp) { + sum(apply(H9miss[indPatterns[,pp], varnames[pattern[pp,]]], 1, + mnormt::dmnorm, mean = mMu[varnames[pattern[pp,]]], + varcov = mSigma[varnames[pattern[pp,]], varnames[pattern[pp,]]], log = TRUE)) +})) +#mLL3 <- sum(sapply(1:4, function(pp) { +# sum(apply(H9miss[indPatterns[,pp], varnames[pattern[pp,]]], 1, +# mvtnorm::dmvnorm, mean = mMu[varnames[pattern[pp,]]], +# sigma = mSigma[varnames[pattern[pp,]], varnames[pattern[pp,]]], log = TRUE)) +#})) +## functions of actual interest +mLL4 <- lav_mvnorm_missing_loglik_data(H9miss, mMu, mSigma, pattern = FALSE) +mLL5 <- lav_mvnorm_missing_loglik_data(H9miss, mMu, mSigma, pattern = TRUE) +## from sample stats +mLL6 <- lav_mvnorm_missing_loglik_samplestats(mfit@SampleStats@missing[[1]], mMu, mSigma) + +test_that("6 target-model log-likelihoods match for missing data", { + expect_equal(mLL1, mLL2) +# expect_equal(mLL1, mLL3) + expect_equal(mLL1, mLL4) + expect_equal(mLL1, mLL5) + expect_equal(mLL1, mLL6) +}) +rm(mLL1, mLL2, + #mLL3, + mLL4, mLL5, mLL6) + + +######################### +## run tests in this file +# test_file("tests/testthat/test_lav_mvnorm.R") + diff -Nru r-cran-lavaan-0.5.22/tests/testthat/test-skip_example.R r-cran-lavaan-0.5.23.1097/tests/testthat/test-skip_example.R --- r-cran-lavaan-0.5.22/tests/testthat/test-skip_example.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/tests/testthat/test-skip_example.R 2017-01-29 20:34:39.000000000 +0000 @@ -0,0 +1,5 @@ +context("skip only") +test_that("skip test", { + skip_level(2) + expect_identical(TRUE, FALSE) +}) diff -Nru r-cran-lavaan-0.5.22/tests/testthat.R r-cran-lavaan-0.5.23.1097/tests/testthat.R --- r-cran-lavaan-0.5.22/tests/testthat.R 1970-01-01 00:00:00.000000000 +0000 +++ r-cran-lavaan-0.5.23.1097/tests/testthat.R 2016-12-30 16:38:59.000000000 +0000 @@ -0,0 +1,4 @@ +library(testthat) +library(lavaan) +# run tests +test_check("lavaan")