Becker-Aloe-Cheung-2019.Rmd
We load the necessary packages as well as the data set for the example. Because for this example we are using only complete data we remove the the two studies with NA
(i.e., Study 6 and Study 17).
library(metaRmat)
library(metafor)
library(corpcor)
library(Matrix)
library(matrixcalc)
becker09 <- na.omit(becker09) # ommiting studies with NA
Next, we create a list, which will be imputed in our other functions.
becker09_list <- df_to_corr(becker09,
variables = c('Cognitive_Performance',
'Somatic_Performance',
'Selfconfidence_Performance',
'Somatic_Cognitive',
'Selfconfidence_Cognitive',
'Selfconfidence_Somatic'),
ID = 'ID')
There are currently three options for the variance-covariance matrix of the correlation matrix (i.e, simple, average, and weighted) for this example we selected the weighted option.
#olkin_siotani(becker09_list, becker09$N, type = 'simple')
#olkin_siotani(becker09_list, becker09$N, type = 'average')
olkin_siotani(becker09_list, becker09$N, type = 'weighted')
#> [[1]]
#> [,1] [,2] [,3] [,4] [,5]
#> [1,] 0.0068964508 0.0034592598 -0.0024249951 -0.0007709388 0.0019806619
#> [2,] 0.0034592598 0.0066061710 -0.0022932962 -0.0002788858 0.0008637677
#> [3,] -0.0024249951 -0.0022932962 0.0053155444 0.0001634926 -0.0001362802
#> [4,] -0.0007709388 -0.0002788858 0.0001634926 0.0037574849 -0.0013337746
#> [5,] 0.0019806619 0.0008637677 -0.0001362802 -0.0013337746 0.0048166300
#> [6,] 0.0010242015 0.0018602041 -0.0005187913 -0.0013441468 0.0021616446
#> [,6]
#> [1,] 0.0010242015
#> [2,] 0.0018602041
#> [3,] -0.0005187913
#> [4,] -0.0013441468
#> [5,] 0.0021616446
#> [6,] 0.0048308442
#>
#> [[2]]
#> [,1] [,2] [,3] [,4] [,5]
#> [1,] 0.026467460 0.013276078 -0.0093067379 -0.0029587381 0.0076014592
#> [2,] 0.013276078 0.025353413 -0.0088012989 -0.0010703186 0.0033150004
#> [3,] -0.009306738 -0.008801299 0.0204001974 0.0006274581 -0.0005230214
#> [4,] -0.002958738 -0.001070319 0.0006274581 0.0144206177 -0.0051188104
#> [5,] 0.007601459 0.003315000 -0.0005230214 -0.0051188104 0.0184854450
#> [6,] 0.003930719 0.007139162 -0.0019910370 -0.0051586176 0.0082960414
#> [,6]
#> [1,] 0.003930719
#> [2,] 0.007139162
#> [3,] -0.001991037
#> [4,] -0.005158618
#> [5,] 0.008296041
#> [6,] 0.018539997
#>
#> [[3]]
#> [,1] [,2] [,3] [,4] [,5]
#> [1,] 0.069949715 0.035086777 -0.024596379 -0.007819522 0.020089571
#> [2,] 0.035086777 0.067005449 -0.023260576 -0.002828699 0.008761072
#> [3,] -0.024596379 -0.023260576 0.053914807 0.001658282 -0.001382271
#> [4,] -0.007819522 -0.002828699 0.001658282 0.038111633 -0.013528285
#> [5,] 0.020089571 0.008761072 -0.001382271 -0.013528285 0.048854390
#> [6,] 0.010388330 0.018867784 -0.005262026 -0.013633489 0.021925252
#> [,6]
#> [1,] 0.010388330
#> [2,] 0.018867784
#> [3,] -0.005262026
#> [4,] -0.013633489
#> [5,] 0.021925252
#> [6,] 0.048998563
#>
#> [[4]]
#> [,1] [,2] [,3] [,4] [,5]
#> [1,] 0.009792960 0.0049121488 -0.0034434930 -0.0010947331 0.0028125399
#> [2,] 0.004912149 0.0093807628 -0.0032564806 -0.0003960179 0.0012265501
#> [3,] -0.003443493 -0.0032564806 0.0075480730 0.0002321595 -0.0001935179
#> [4,] -0.001094733 -0.0003960179 0.0002321595 0.0053356286 -0.0018939599
#> [5,] 0.002812540 0.0012265501 -0.0001935179 -0.0018939599 0.0068396147
#> [6,] 0.001454366 0.0026414898 -0.0007366837 -0.0019086885 0.0030695353
#> [,6]
#> [1,] 0.0014543662
#> [2,] 0.0026414898
#> [3,] -0.0007366837
#> [4,] -0.0019086885
#> [5,] 0.0030695353
#> [6,] 0.0068597988
#>
#> [[5]]
#> [,1] [,2] [,3] [,4] [,5]
#> [1,] 0.019201883 0.0096316644 -0.0067519471 -0.0021465355 0.0055147841
#> [2,] 0.009631664 0.0183936526 -0.0063852560 -0.0007765056 0.0024050003
#> [3,] -0.006751947 -0.0063852560 0.0148001432 0.0004552147 -0.0003794469
#> [4,] -0.002146536 -0.0007765056 0.0004552147 0.0104620168 -0.0037136468
#> [5,] 0.005514784 0.0024050003 -0.0003794469 -0.0037136468 0.0134110091
#> [6,] 0.002851698 0.0051793917 -0.0014444778 -0.0037425265 0.0060186967
#> [,6]
#> [1,] 0.002851698
#> [2,] 0.005179392
#> [3,] -0.001444478
#> [4,] -0.003742526
#> [5,] 0.006018697
#> [6,] 0.013450586
#>
#> [[6]]
#> [,1] [,2] [,3] [,4] [,5]
#> [1,] 0.0076507501 0.0038376163 -0.0026902289 -0.0008552602 0.0021972968
#> [2,] 0.0038376163 0.0073287209 -0.0025441255 -0.0003093890 0.0009582423
#> [3,] -0.0026902289 -0.0025441255 0.0058969321 0.0001813746 -0.0001511859
#> [4,] -0.0008552602 -0.0003093890 0.0001813746 0.0041684598 -0.0014796561
#> [5,] 0.0021972968 0.0009582423 -0.0001511859 -0.0014796561 0.0053434489
#> [6,] 0.0011362236 0.0020636639 -0.0005755341 -0.0014911629 0.0023980745
#> [,6]
#> [1,] 0.0011362236
#> [2,] 0.0020636639
#> [3,] -0.0005755341
#> [4,] -0.0014911629
#> [5,] 0.0023980745
#> [6,] 0.0053592178
#>
#> [[7]]
#> [,1] [,2] [,3] [,4] [,5]
#> [1,] 0.013989943 0.0070173555 -0.0049192757 -0.0015639044 0.0040179142
#> [2,] 0.007017355 0.0134010897 -0.0046521151 -0.0005657398 0.0017522145
#> [3,] -0.004919276 -0.0046521151 0.0107829615 0.0003316564 -0.0002764542
#> [4,] -0.001563904 -0.0005657398 0.0003316564 0.0076223265 -0.0027056569
#> [5,] 0.004017914 0.0017522145 -0.0002764542 -0.0027056569 0.0097708781
#> [6,] 0.002077666 0.0037735568 -0.0010524053 -0.0027266979 0.0043850504
#> [,6]
#> [1,] 0.002077666
#> [2,] 0.003773557
#> [3,] -0.001052405
#> [4,] -0.002726698
#> [5,] 0.004385050
#> [6,] 0.009799713
#>
#> [[8]]
#> [,1] [,2] [,3] [,4] [,5]
#> [1,] 0.032643200 0.016373829 -0.0114783100 -0.003649110 0.0093751330
#> [2,] 0.016373829 0.031269209 -0.0108549353 -0.001320060 0.0040885004
#> [3,] -0.011478310 -0.010854935 0.0251602435 0.000773865 -0.0006450598
#> [4,] -0.003649110 -0.001320060 0.0007738650 0.017785429 -0.0063131995
#> [5,] 0.009375133 0.004088500 -0.0006450598 -0.006313200 0.0227987155
#> [6,] 0.004847887 0.008804966 -0.0024556123 -0.006362295 0.0102317844
#> [,6]
#> [1,] 0.004847887
#> [2,] 0.008804966
#> [3,] -0.002455612
#> [4,] -0.006362295
#> [5,] 0.010231784
#> [6,] 0.022865996
The function below creates and organize elements that are then fitted internally into the metafor
package. Below we fitted fixed and random-effects models and extracted some more detail information from objects that are not directly output by the functions. First, we see results under fixed-effect models.
input_metafor <- prep_data(becker09, becker09$N, type = 'weighted', missing = FALSE,
variable_names = c('Cognitive_Performance', 'Somatic_Performance',
'Selfconfidence_Performance',
'Somatic_Cognitive',
'Selfconfidence_Cognitive',
'Selfconfidence_Somatic'),
ID = 'ID')
fixed_model <- fit_model(data = input_metafor, effect_size = 'yi',
var_cor = 'V', moderators = ~ -1 + factor(outcome),
random_params = NULL)
#> Warning: Model does not contain an '~ inner | outer' term, so 'struct' argument
#> is disregaded.
# below the model_out_fixed will extract some of the info but I am also extracting some here
round(fixed_model$b, 3) # fixed effect estimates
#> [,1]
#> factor(outcome)1 -0.102
#> factor(outcome)2 -0.177
#> factor(outcome)3 0.362
#> factor(outcome)4 0.519
#> factor(outcome)5 -0.416
#> factor(outcome)6 -0.414
round(fixed_model$vb, 5) # fixed effect COV
#> factor(outcome)1 factor(outcome)2 factor(outcome)3
#> factor(outcome)1 0.00171 0.00086 -0.00060
#> factor(outcome)2 0.00086 0.00164 -0.00057
#> factor(outcome)3 -0.00060 -0.00057 0.00132
#> factor(outcome)4 -0.00019 -0.00007 0.00004
#> factor(outcome)5 0.00049 0.00021 -0.00003
#> factor(outcome)6 0.00025 0.00046 -0.00013
#> factor(outcome)4 factor(outcome)5 factor(outcome)6
#> factor(outcome)1 -0.00019 0.00049 0.00025
#> factor(outcome)2 -0.00007 0.00021 0.00046
#> factor(outcome)3 0.00004 -0.00003 -0.00013
#> factor(outcome)4 0.00093 -0.00033 -0.00033
#> factor(outcome)5 -0.00033 0.00120 0.00054
#> factor(outcome)6 -0.00033 0.00054 0.00120
fixed_model # forthe Q and df
#>
#> Multivariate Meta-Analysis Model (k = 48; method: REML)
#>
#> Variance Components: none
#>
#> Test for Residual Heterogeneity:
#> QE(df = 42) = 176.6654, p-val < .0001
#>
#> Test of Moderators (coefficients 1:6):
#> QM(df = 6) = 485.0641, p-val < .0001
#>
#> Model Results:
#>
#> estimate se zval pval ci.lb ci.ub
#> factor(outcome)1 -0.1020 0.0414 -2.4654 0.0137 -0.1831 -0.0209 *
#> factor(outcome)2 -0.1774 0.0405 -4.3796 <.0001 -0.2567 -0.0980 ***
#> factor(outcome)3 0.3622 0.0363 9.9713 <.0001 0.2910 0.4334 ***
#> factor(outcome)4 0.5192 0.0305 16.9989 <.0001 0.4593 0.5790 ***
#> factor(outcome)5 -0.4159 0.0346 -12.0276 <.0001 -0.4837 -0.3481 ***
#> factor(outcome)6 -0.4144 0.0346 -11.9675 <.0001 -0.4823 -0.3466 ***
#>
#> ---
#> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Now we fit a random-effects model and extract some objects from this output.
random_model <- fit_model(data = input_metafor, effect_size = 'yi',
var_cor = 'V', moderators = ~ -1 + factor(outcome),
random_params = ~ factor(outcome) | factor(study))
# below the model_out_random will extract some of the info but I am also extracting some here
round(random_model$tau2, 3) # between studies variance
#> [1] 0.132 0.044 0.042 0.001 0.006 0.004
round(random_model$b, 3) # random effect estimate
#> [,1]
#> factor(outcome)1 -0.098
#> factor(outcome)2 -0.176
#> factor(outcome)3 0.319
#> factor(outcome)4 0.527
#> factor(outcome)5 -0.418
#> factor(outcome)6 -0.401
round(random_model$vb, 5) # random effect Cov
#> factor(outcome)1 factor(outcome)2 factor(outcome)3
#> factor(outcome)1 0.01898 0.01084 -0.00476
#> factor(outcome)2 0.01084 0.00743 -0.00282
#> factor(outcome)3 -0.00476 -0.00282 0.00707
#> factor(outcome)4 0.00022 0.00014 -0.00095
#> factor(outcome)5 -0.00184 -0.00104 0.00123
#> factor(outcome)6 0.00056 0.00058 -0.00180
#> factor(outcome)4 factor(outcome)5 factor(outcome)6
#> factor(outcome)1 0.00022 -0.00184 0.00056
#> factor(outcome)2 0.00014 -0.00104 0.00058
#> factor(outcome)3 -0.00095 0.00123 -0.00180
#> factor(outcome)4 0.00111 -0.00053 -0.00002
#> factor(outcome)5 -0.00053 0.00211 0.00020
#> factor(outcome)6 -0.00002 0.00020 0.00175
These are the results under random effect models. This is what the typical output for the mode extract_model
function for fixed-effect model looks like.
model_out_fixed <- extract_model(fixed_model,
variable_names = c('Cognitive_Performance',
'Somatic_Performance',
'Selfconfidence_Performance',
'Somatic_Cognitive',
'Selfconfidence_Cognitive',
'Selfconfidence_Somatic'))
model_out_fixed
#> $beta_matrix
#> Performance Cognitive Somatic Selfconfidence
#> Performance 1.0000000 -0.1020105 -0.1773601 0.3622203
#> Cognitive -0.1020105 1.0000000 0.5191783 -0.4159091
#> Somatic -0.1773601 0.5191783 1.0000000 -0.4144406
#> Selfconfidence 0.3622203 -0.4159091 -0.4144406 1.0000000
#>
#> $var_matrix
#> factor(outcome)1 factor(outcome)2 factor(outcome)3
#> factor(outcome)1 0.0017120560 8.587673e-04 -6.020093e-04
#> factor(outcome)2 0.0008587673 1.639993e-03 -5.693148e-04
#> factor(outcome)3 -0.0006020093 -5.693148e-04 1.319593e-03
#> factor(outcome)4 -0.0001913869 -6.923389e-05 4.058732e-05
#> factor(outcome)5 0.0004917028 2.144318e-04 -3.383181e-05
#> factor(outcome)6 0.0002542598 4.617989e-04 -1.287909e-04
#> factor(outcome)4 factor(outcome)5 factor(outcome)6
#> factor(outcome)1 -1.913869e-04 4.917028e-04 0.0002542598
#> factor(outcome)2 -6.923389e-05 2.144318e-04 0.0004617989
#> factor(outcome)3 4.058732e-05 -3.383181e-05 -0.0001287909
#> factor(outcome)4 9.328022e-04 -3.311119e-04 -0.0003336868
#> factor(outcome)5 -3.311119e-04 1.195737e-03 0.0005366320
#> factor(outcome)6 -3.336868e-04 5.366320e-04 0.0011992655
#>
#> $tau
#> [1] 0
#>
#> $rho
#> [1] 0
For the random-effect, we extract several more objects from the extract_model
function. We can also build a correlation matrix of the \(\hat{\tau}^2\) as well.
model_out_random <- extract_model(random_model,
variable_names = c('Cognitive_Performance',
'Somatic_Performance',
'Selfconfidence_Performance',
'Somatic_Cognitive',
'Selfconfidence_Cognitive',
'Selfconfidence_Somatic'))
Rho <- model_out_random$rho
Tau2 <- model_out_random$tau
#------------------
# tau matrix
#------------------
rho_t <- vec2sm(Rho, diag = FALSE)
diag <- 1
diag(rho_t) <- diag
tdiag <- diag(sqrt(Tau2))
taumat <- tdiag %*% rho_t %*% tdiag
round(taumat, 3)
#> [,1] [,2] [,3] [,4] [,5] [,6]
#> [1,] 0.132 0.076 -0.031 -0.005 0.007 0.005
#> [2,] 0.076 0.044 -0.042 -0.005 -0.010 0.008
#> [3,] -0.031 -0.042 0.042 -0.004 0.002 0.001
#> [4,] -0.005 -0.005 -0.004 0.001 -0.003 0.002
#> [5,] 0.007 -0.010 0.002 -0.003 0.006 -0.002
#> [6,] 0.005 0.008 0.001 0.002 -0.002 0.004
round(cov2cor(taumat),2)
#> [,1] [,2] [,3] [,4] [,5] [,6]
#> [1,] 1.00 1.00 -0.42 -0.39 0.23 0.20
#> [2,] 1.00 1.00 -0.98 -0.63 -0.59 0.59
#> [3,] -0.42 -0.98 1.00 -0.52 0.10 0.06
#> [4,] -0.39 -0.63 -0.52 1.00 -0.94 0.99
#> [5,] 0.23 -0.59 0.10 -0.94 1.00 -0.49
#> [6,] 0.20 0.59 0.06 0.99 -0.49 1.00
Now, we are ready to input the average correlation matrix and its variance covariance matrix into the lavaan package and our own function to appropriate estimate SE via the multivariate delta method.
model <- "## Regression paths
Performance ~ Cognitive + Somatic + Selfconfidence
Selfconfidence ~ Cognitive + Somatic
"
path_output <- path_model(data = model_out_random, model = model,
num_obs = sum(becker09$N))
summary(path_output)
#> Average Correlation Matrix:
#> Performance Cognitive Somatic Selfconfidence
#> Performance 1.00000000 -0.09773331 -0.1755029 0.3186775
#> Cognitive -0.09773331 1.00000000 0.5271873 -0.4175596
#> Somatic -0.17550292 0.52718732 1.0000000 -0.4006848
#> Selfconfidence 0.31867753 -0.41755963 -0.4006848 1.0000000
#>
#>
#> Model Fitted:
#> ## Regression paths
#> Performance ~ Cognitive + Somatic + Selfconfidence
#> Selfconfidence ~ Cognitive + Somatic
#>
#>
#> Variance Estimates:
#>
#>
#> Covariance Estimates:
#>
#>
#> Fixed Effects:
#> predictor outcome estimate
#> Cognitive -> Performance Cognitive Performance 0.08316156
#> Somatic -> Performance Somatic Performance -0.09261004
#> Selfconfidence -> Performance Selfconfidence Performance 0.31629500
#> Cognitive -> Selfconfidence Cognitive Selfconfidence -0.28573778
#> Somatic -> Selfconfidence Somatic Selfconfidence -0.25004745
#> standard_errors test_statistic p_value
#> Cognitive -> Performance 0.15426969 0.5390661 5.898413e-01
#> Somatic -> Performance 0.06352263 -1.4579062 1.448664e-01
#> Selfconfidence -> Performance 0.10216547 3.0959091 1.962105e-03
#> Cognitive -> Selfconfidence 0.03848324 -7.4249929 1.127860e-13
#> Somatic -> Selfconfidence 0.05226122 -4.7845698 1.713539e-06
We now subset the data to obtain results only for the studies that reported on Team sports.
becker09_T <- subset(becker09, becker09$Team == "T")
becker09_list2 <- df_to_corr(becker09_T,
variables = c('Cognitive_Performance',
'Somatic_Performance',
'Selfconfidence_Performance',
'Somatic_Cognitive',
'Selfconfidence_Cognitive',
'Selfconfidence_Somatic'),
ID = 'ID')
input_metafor2 <- prep_data(becker09_T, becker09_T$N, type = 'weighted', missing = FALSE,
variable_names = c('Cognitive_Performance', 'Somatic_Performance',
'Selfconfidence_Performance',
'Somatic_Cognitive',
'Selfconfidence_Cognitive',
'Selfconfidence_Somatic'),
ID = 'ID')
random_model2 <- fit_model(data = input_metafor2, effect_size = 'yi',
var_cor = 'V', moderators = ~ -1 + factor(outcome),
random_params = ~ factor(outcome) | factor(study))
model_out_random2 <- extract_model(random_model2,
variable_names = c('Cognitive_Performance',
'Somatic_Performance',
'Selfconfidence_Performance',
'Somatic_Cognitive',
'Selfconfidence_Cognitive',
'Selfconfidence_Somatic'))
model <- "## Regression paths
Performance ~ Cognitive + Somatic + Selfconfidence
Selfconfidence ~ Cognitive + Somatic
"
path_output2 <- path_model(data = model_out_random2, model = model,
num_obs = sum(becker09_T$N))
summary(path_output2)
#> Average Correlation Matrix:
#> Performance Cognitive Somatic Selfconfidence
#> Performance 1.0000000 -0.1245062 -0.1852845 0.2328835
#> Cognitive -0.1245062 1.0000000 0.5825297 -0.3927942
#> Somatic -0.1852845 0.5825297 1.0000000 -0.3533525
#> Selfconfidence 0.2328835 -0.3927942 -0.3533525 1.0000000
#>
#>
#> Model Fitted:
#> ## Regression paths
#> Performance ~ Cognitive + Somatic + Selfconfidence
#> Selfconfidence ~ Cognitive + Somatic
#>
#>
#> Variance Estimates:
#>
#>
#> Covariance Estimates:
#>
#>
#> Fixed Effects:
#> predictor outcome estimate
#> Cognitive -> Performance Cognitive Performance 0.03091857
#> Somatic -> Performance Somatic Performance -0.13336598
#> Selfconfidence -> Performance Selfconfidence Performance 0.19790297
#> Cognitive -> Selfconfidence Cognitive Selfconfidence -0.28298386
#> Somatic -> Selfconfidence Somatic Selfconfidence -0.18850603
#> standard_errors test_statistic p_value
#> Cognitive -> Performance 0.25963239 0.119086 9.052073e-01
#> Somatic -> Performance 0.08357853 -1.595697 1.105565e-01
#> Selfconfidence -> Performance 0.16481574 1.200753 2.298471e-01
#> Cognitive -> Selfconfidence 0.06463990 -4.377851 1.198553e-05
#> Somatic -> Selfconfidence 0.12485600 -1.509787 1.310977e-01
Similarly, we now subset the data to obtain results only for the studies that reported on Individual sports.
becker09_I <- subset(becker09, becker09$Team == "I")
becker09_list3 <- df_to_corr(becker09_I,
variables = c('Cognitive_Performance',
'Somatic_Performance',
'Selfconfidence_Performance',
'Somatic_Cognitive',
'Selfconfidence_Cognitive',
'Selfconfidence_Somatic'),
ID = 'ID')
input_metafor3 <- prep_data(becker09_I, becker09_I$N, type = 'weighted', missing = FALSE,
variable_names = c('Cognitive_Performance', 'Somatic_Performance',
'Selfconfidence_Performance',
'Somatic_Cognitive',
'Selfconfidence_Cognitive',
'Selfconfidence_Somatic'),
ID = 'ID')
random_model3 <- fit_model(data = input_metafor3, effect_size = 'yi',
var_cor = 'V', moderators = ~ -1 + factor(outcome),
random_params = ~ factor(outcome) | factor(study))
model_out_random3 <- extract_model(random_model3,
variable_names = c('Cognitive_Performance',
'Somatic_Performance',
'Selfconfidence_Performance',
'Somatic_Cognitive',
'Selfconfidence_Cognitive',
'Selfconfidence_Somatic'))
model <- "## Regression paths
Performance ~ Cognitive + Somatic + Selfconfidence
Selfconfidence ~ Cognitive + Somatic"
path_output3 <- path_model(data = model_out_random3, model = model,
num_obs = sum(becker09_I$N))
lavaan::summary(path_output3)
#> Average Correlation Matrix:
#> Performance Cognitive Somatic Selfconfidence
#> Performance 1.00000000 -0.08366167 -0.1828913 0.3375315
#> Cognitive -0.08366167 1.00000000 0.4904676 -0.4656700
#> Somatic -0.18289134 0.49046760 1.0000000 -0.4976074
#> Selfconfidence 0.33753152 -0.46566996 -0.4976074 1.0000000
#>
#>
#> Model Fitted:
#> ## Regression paths
#> Performance ~ Cognitive + Somatic + Selfconfidence
#> Selfconfidence ~ Cognitive + Somatic
#>
#> Variance Estimates:
#>
#>
#> Covariance Estimates:
#>
#>
#> Fixed Effects:
#> predictor outcome estimate
#> Cognitive -> Performance Cognitive Performance 0.11330418
#> Somatic -> Performance Somatic Performance -0.05881317
#> Selfconfidence -> Performance Selfconfidence Performance 0.36102801
#> Cognitive -> Selfconfidence Cognitive Selfconfidence -0.29180609
#> Somatic -> Selfconfidence Somatic Selfconfidence -0.35448594
#> standard_errors test_statistic p_value
#> Cognitive -> Performance 0.23312948 0.4860139 6.269573e-01
#> Somatic -> Performance 0.13567901 -0.4334729 6.646713e-01
#> Selfconfidence -> Performance 0.14685898 2.4583311 1.395844e-02
#> Cognitive -> Selfconfidence 0.06212761 -4.6968827 2.641621e-06
#> Somatic -> Selfconfidence 0.08749570 -4.0514672 5.089747e-05
Here we compute the synthetic partial correlation from the average correlation matrix.
# partial corr matrix
round(cor2pcor(model_out_random$beta_matrix),3)
#> [,1] [,2] [,3] [,4]
#> [1,] 1.000 0.072 -0.081 0.284
#> [2,] 0.072 1.000 0.436 -0.274
#> [3,] -0.081 0.436 1.000 -0.201
#> [4,] 0.284 -0.274 -0.201 1.000
# var-cov matrix of partials
Psy <- random_model$vb
round(var_path(model_out_random$beta_matrix, Psy, type = 'pcor'), 4)
#> [,1] [,2] [,3] [,4] [,5] [,6]
#> [1,] 0.0174 0.0009 0.0046 0.0005 -0.0073 0.0018
#> [2,] 0.0009 0.0029 0.0030 -0.0006 0.0004 -0.0016
#> [3,] 0.0046 0.0030 0.0078 -0.0013 -0.0002 -0.0022
#> [4,] 0.0005 -0.0006 -0.0013 0.0016 -0.0003 0.0012
#> [5,] -0.0073 0.0004 -0.0002 -0.0003 0.0058 -0.0027
#> [6,] 0.0018 -0.0016 -0.0022 0.0012 -0.0027 0.0035
Here we work with partial correlation for each study and then synthesize that information.
#---------------------------------------------------------------------
# Create a data set with 8 complete studies
#---------------------------------------------------------------------
R <- becker09_list
R$"6" <- NULL
R$"17" <- NULL
n <- becker09$N[c(-3, -5)]
#------------------------------------------------------------------
# first replace NA by zeros
RR <- R # redifine list
PR <- lapply(RR, cor2pcor)
pr <- unlist(lapply(PR, '[[', 4))
var_pr <- (1-pr^2)^2 / (n - 3 -1)
#> Warning in (1 - pr^2)^2/(n - 3 - 1): longer object length is not a multiple of
#> shorter object length
rma.uni(pr, var_pr)
#>
#> Random-Effects Model (k = 8; tau^2 estimator: REML)
#>
#> tau^2 (estimated amount of total heterogeneity): 0.0661 (SE = 0.0426)
#> tau (square root of estimated tau^2 value): 0.2571
#> I^2 (total heterogeneity / total variability): 89.26%
#> H^2 (total variability / sampling variability): 9.31
#>
#> Test for Heterogeneity:
#> Q(df = 7) = 64.1834, p-val < .0001
#>
#> Model Results:
#>
#> estimate se zval pval ci.lb ci.ub
#> 0.2889 0.1001 2.8872 0.0039 0.0928 0.4850 **
#>
#> ---
#> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1