---
title: "R Notebook"
output: html_notebook
---
Určete aritmetický průměr, medián, rozptyl a směrodatnou odchylku souboru 34 měření úbytku alkalické rezervy u matek prvorodiček.
```{r echo=FALSE, message=FALSE, warning=FALSE}
xpr = c(38.4, 41.2, 39.7, 44.1, 32.4, 41.2, 52.1, 38.1, 40.5, 40.9, 43.9, 42.9, 41.5, 44.8, 38.6, 40.7, 39.6, 38.8, 38.8, 42.5, 38.5, 39.7, 38.8, 33.6, 43.8, 48.7, 39.0, 36.7, 40.2, 38.5, 27.8, 44.6, 36.7, 39.9)
### Overeni predpokladu
## Nezavislost
library(s20x)
trendscatter(c(1:length(xpr)), y = xpr, f = 0.5, xlab = "Index", ylab = "Zmena alk, rezervy", main = "")
library(DescTools)
VonNeumannTest(xpr, alternative = "two.sided", unbiased = TRUE)
# alternative = c("two.sided", "less", "greater")
library(DescTools)
BartelsRankTest(xpr, alternative = "two.sided", method = "normal")
# alternative = c("two.sided", "trend", "oscillation")
# method = c("normal", "beta", "auto")
library(litteR)
mk = mann_kendall(xpr, type = "both")
# type = c("both", "increasing", "decreasing")
p_value(mk)
library(lawstat)
runs.test(xpr,plot.it = TRUE,alternative = "two.sided")
# alternative = c("two.sided", "positive.correlated", "negative.correlated")
## Normalita
# Cullen Frey plot
library(fitdistrplus)
descdist(xpr, discrete=FALSE)
descdist(xpr, discrete=FALSE, boot=999)
library(PASWR2)
eda(xpr, trim = 0, dec = 3)
library(s20x)
normcheck(xpr)
library(UsingR)
simple.eda(xpr)
library(StatDA)
edaplot(xpr,H.freq=FALSE,box=TRUE,H.breaks=30,S.pch=3,S.cex=0.5,D.lwd=1.5,P.log=FALSE,P.main="",P.xlab="Zmena alk. rezervy", P.ylab="Density",B.pch=3,B.cex=0.5)
# Shapiro-Francia test
library(nortest)
sf.test(xpr)
# Shapiro-Wilk test
shapiro.test(xpr)
# Anderson-Darling test
library(nortest)
ad.test(xpr)
# Cramer-von Mises test
cvm.test(xpr)
# Jarque Bera test
library(tseries)
jarque.bera.test(xpr)
library(lawstat)
rjb.test(xpr, option="JB",crit.values="empirical", N = 5000) # klasicky J. B. test
rjb.test(xpr, option="RJB",crit.values="empirical", N = 5000) # robustni J. B. test
# crit.values = c("chisq.approximation", "empirical")
# Lilliefors test (modif. Kolmogorov-Smirnov)
library(nortest)
lillie.test(xpr)
# sj test
library(lawstat)
sj.test(xpr, crit.values = "empirical", N = 1000)
# crit.values = c("t.approximation", "empirical")
## outliers
boxplot.stats(xpr, coef = 1.5, do.conf = TRUE, do.out = TRUE)
library(univOutl)
boxB(xpr, k=1.5, method="resistant", weights=NULL, id=NULL, exclude=NA, logt=FALSE)
boxB(xpr, k=1.5, method='asymmetric', weights=NULL, id=NULL,exclude=NA, logt=FALSE)$outliers
boxB(xpr, k=1.5, method='adjbox', weights=NULL, id=NULL, exclude=NA, logt=FALSE)$outliers
# DAgostinuv test sikmosti
library(moments)
agostino.test(xpr, alternative = "two.sided")
# alternative = c("two.sided", "less", "greater")
library(moments)
moments::skewness(xpr)
library(e1071)
e1071::skewness(xpr, na.rm = FALSE, type = 3)
library(confintr)
ci_skewness(xpr,probs = c(0.025, 0.975),type = "bootstrap", boot_type = "bca", R = 9999,seed = NULL)
# boot_type = c("bca", "perc", "norm", "basic")
# Anscombe-Glynnuv test spicatosti
library(moments)
anscombe.test(xpr, alternative = "two.sided")
# alternative = c("two.sided", "less", "greater")
# Geary test spicatosti
library(fmsb)
geary.test(xpr)
library(moments)
kurtosis(xpr)# spicatost
library(confintr)
ci_kurtosis(xpr,probs = c(0.025, 0.975),type = "bootstrap", boot_type = "bca", R = 9999, seed = NULL)
# boot_type = c("bca", "perc", "norm", "basic")
# Box-Cox
library(car)
boxCox(lm(xpr~1), lambda = seq(-2, 2, 1/10), plotit = TRUE, interp = TRUE, eps = 1/50, xlab=NULL, ylab=NULL, family="bcPower", param= "lambda", gamma=NULL,grid=TRUE)
library(forecast)
lambda = BoxCox.lambda(xpr, method = "loglik", lower = -2, upper = 2)
# method = c("guerrero", "loglik")
xbc = BoxCox(xpr, lambda)
xrt = InvBoxCox(xbc, lambda, biasadj = FALSE, fvar = NULL)
par(mfrow=c(1,2))
hist(xpr, main="")
hist(xbc, main="",xlab="transf x")
par(mfrow=c(1,1))
er <- qt(0.975,df=length(xbc)-1)*sd(xbc)/sqrt(length(xbc))
left <- mean(xbc)-er; left
right <- mean(xbc)+er; right
InvBoxCox(c(mean(xbc), left, right), lambda, biasadj = FALSE, fvar = NULL)
### Momentove odhady
mean(xpr)
library(confintr)
ci_mean(xpr, probs = c(0.025, 0.975),type = "t")
ci_mean(xpr, probs = c(0.025, 0.975),type = "bootstrap", boot_type = "bca", R = 9999, seed = NULL)
# boot_type = c("stud" (default), "bca", "perc", "norm", "basic")
library(MKinfer)
meanCI(xpr, conf.level = 0.95, boot = FALSE, na.rm = TRUE, alternative = "two.sided")
meanCI(xpr, conf.level = 0.95, boot = TRUE, R = 9999, bootci.type = "all",na.rm = TRUE, alternative = "two.sided")
# alternative = c("two.sided", "less", "greater")
sd(x)
library(confintr)
ci_sd(xpr,probs = c(0.025, 0.975),type = "chi-squared")
ci_sd(xpr,probs = c(0.025, 0.975),type = "bootstrap", boot_type = "bca",R = 9999, seed = NULL)
# type = c("chi-squared", "bootstrap"),
# boot_type = c("bca", "perc", "stud", "norm", "basic")
library(MKinfer)
sdCI(xpr, conf.level = 0.95, boot = FALSE,na.rm = TRUE, alternative = "two.sided")
sdCI(xpr, conf.level = 0.95, boot = TRUE, R = 9999, bootci.type = "all",na.rm = TRUE, alternative = "two.sided")
# alternative = c("two.sided", "less", "greater")
mean(xpr,trim=0.1)
mean(xpr,trim=0.07)
library(asbio)
mean(trim.me(xpr, trim = 0.07))
sd(trim.me(xpr, trim = 0.07))
library(confintr)
ci_mean(trim.me(xpr, trim = 0.07), probs = c(0.025, 0.975),type = "t")
ci_mean(trim.me(xpr, trim = 0.07), probs = c(0.025, 0.975),type = "bootstrap", boot_type = "bca", R = 9999, seed = NULL)
# boot_type = c("stud" (default), "bca", "perc", "norm", "basic")
length(xpr)
library(asbio)
length(trim.me(xpr, trim = 0.07))
length(trim.me(xpr, trim = 0.1))
length(trim.me(xpr, trim = 0.2))
library(asbio)
win(xpr,lambda = 0.07)
library(WRS2)
winmean(xpr, tr = 0.1, na.rm = FALSE)
winmean(xpr, tr = 0.07, na.rm = FALSE)
sqrt(winvar(xpr, tr = 0.07, na.rm = FALSE, STAND = NULL))
library(confintr)
ci_mean(win(xpr,lambda = 0.07), probs = c(0.025, 0.975),type = "t")
ci_mean(win(xpr,lambda = 0.07), probs = c(0.025, 0.975),type = "bootstrap", boot_type = "bca", R = 9999, seed = NULL)
# boot_type = c("stud" (default), "bca", "perc", "norm", "basic")
### robustni M-odhady (Huber)
library(MASS)
hub = huber(xpr, k = 1.5, tol = 1e-06)
hub$mu
hub$s
### Kvantilove odhady
median(xpr)
library(confintr)
ci_median(xpr,probs = c(0.025, 0.975),type = "bootstrap", boot_type = "bca", R = 9999,seed = NULL)
# type = c("binomial", "bootstrap")
# boot_type = c("bca", "perc", "norm", "basic")
library(MKinfer)
medianCI(xpr, conf.level = 0.95, method = "exact",R = 9999, bootci.type = "bca",minLength = FALSE, na.rm = FALSE,alternative = "two.sided")
# method = c("exact", "asymptotic")
# bootci.type = c("norm", "basic", "perc", "bca"),
# alternative = c("two.sided", "less", "greater")
mad(xpr, center = median(x), constant = 1.4826, na.rm = FALSE)
library(rQCC)
mad.unbiased (xpr, center = median(x), constant=1.4826, na.rm = FALSE)
mad2.unbiased(xpr, center = median(x), constant=1.4826, na.rm = FALSE)
library(confintr)
ci_mad(xpr,probs = c(0.025, 0.975),constant = 1.4826,type = "bootstrap", boot_type = "bca",R = 9999, seed = NULL)
# boot_type = c("bca", "perc", "norm", "basic")
library(MKinfer)
madCI(xpr, conf.level = 0.95, method = "exact", minLength = FALSE,R = 9999, bootci.type = "bca",na.rm = FALSE, constant = 1.4826,alternative = "two.sided")
# method = c("exact", "asymptotic")
# bootci.type = c("norm", "basic", "perc", "bca"),
# alternative = c("two.sided", "less", "greater")
### Odhady metodou max. verohodnosti
library(MASS)
fitdistr(xpr, densfun="normal")
fitdistr(xpr, densfun="t")
fitdistr(xpr, densfun="cauchy")
library(broom)
bn = broom::glance(MASS::fitdistr(xpr,"normal"))
bt = broom::glance(MASS::fitdistr(xpr,"t"))
bc = broom::glance(MASS::fitdistr(xpr,"cauchy"))
rbind(bn,bt,bc)
library(fitdistrplus)
fitdist(xpr,distr="norm", method = "mle")
# method = c("mle", "mme", "qme", "mge", "mse")
```
---
title: "R Notebook"
output: html_notebook
---
Určete střední hodnotu 24 stanovení mědi (v ppm) v celozrnné mouce.
```{r}
library(MASS)
data(chem)
xch = chem
# Cullen Frey plot
library(fitdistrplus)
descdist(xch, discrete=FALSE)
descdist(xch, discrete=FALSE, boot=999)
library(PASWR2)
eda(xch, trim = 0.05, dec = 3)
library(s20x)
normcheck(xch)
library(UsingR)
simple.eda(xch)
library(StatDA)
edaplot(xch,H.freq=FALSE,box=TRUE,H.breaks=30,S.pch=3,S.cex=0.5,D.lwd=1.5,P.log=FALSE,P.main="",P.xlab="Cu [ppm]",P.ylab="Density",B.pch=3,B.cex=0.5)
boxplot.stats(xch, coef = 1.5, do.conf = TRUE, do.out = TRUE)
library(univOutl)
boxB(xch, k=1.5, method="resistant", weights=NULL, id=NULL, exclude=NA, logt=FALSE)
boxB(xch, k=1.5, method='asymmetric', weights=NULL, id=NULL,exclude=NA, logt=FALSE)$outliers
boxB(xch, k=1.5, method='adjbox', weights=NULL, id=NULL, exclude=NA, logt=FALSE)$outliers
library(EnvStats)
rosnerTest(xch, k = 2, alpha = 0.05, warn = TRUE)
library(outliers)
grubbs.test(xch, type=20, opposite=FALSE, two.sided=TRUE)
library(extremevalues)
getOutliers(xch,method="I",distribution="normal")
getOutliers(xch,method="II",distribution="normal")
L <- getOutliers(xch,method="II",distribution="normal")
outlierPlot(xch,L,mode="qq")
outlierPlot(xch,L,mode="residual")
library(OutlierDetection)
UnivariateOutlierDetection(xch, k = 0.05 * length(xch), cutoff = 0.95, dist = FALSE, dens = FALSE, depth = FALSE, Method = "euclidean", rnames = FALSE)
mean(xch)
median(xch)
sd(xch, na.rm=TRUE)
mean(xch,trim=0.1)
mean(xch,trim=0.05)
library(asbio)
mean(trim.me(xch, trim = 0.05))
sd(trim.me(xch, trim = 0.05))
length(xch)
library(asbio)
length(trim.me(xch, trim = 0.05))
length(trim.me(xch, trim = 0.1))
library(WRS2)
winmean(xch, tr = 0.1, na.rm = FALSE)
winmean(xch, tr = 0.05, na.rm = FALSE)
sqrt(winvar(x, tr = 0.2, na.rm = FALSE, STAND = NULL))
### robustni M-odhady (Huber)
library(MASS)
hub = huber(xch, k = 1.5, tol = 1e-06)
hub$mu
hub$s
```
Průměrná koncentrace z 8 stanovení iontů Na+ v roztoku byla 0.547 mol/l se směrodatnou odchylkou 0.003 mol/l. Určete intervaly spolehlivosti pro průměr, směrodatnou odchylku a rozptyl pro hladinu významnosti 0.05.
```{r}
m = 0.547
s = 0.003
s2 = s^2; s2
n = 5
m + c(-s,s)*qt(0.95,df=n-1)/sqrt(n)
library(LearningStats)
Mean.CI(m, sigma = NULL, sc = NULL, s = s, n = n, conf.level= 0.95)
s*sqrt((n-1)/qchisq(c(0.975,0.025),df=n-1))
library(LearningStats)
variance.CI(x = NULL, s = s, sc = NULL, smu = NULL, mu = NULL, n = n, conf.level= 0.95)
```
Byl měřen obsah cínu (v %) ve 100 vzorcích rudy. Výsledky byly zaznamenány formou četnostní tabulky. Určete průměrnou hodnotu a rozptyl.
```{r}
xx = c(30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)
fx = c(4, 6, 8, 15, 25, 20, 8, 7, 5, 2)
N = sum(fx)
wt <- fx/N
xx1 = xx[-1]
xx2 = xx[-length(xx)]
xxm = (xx2+xx1)/2
barplot(wt, names.arg = xxm,xlab = "Sn (%)")
density=wt/diff(xx)
plot(xxm,density,type = "h",lwd=8,col=4,xlab = "Sn (%)")
abline(h=0)
xs = seq(xxm[1],xxm[length(xxm)],0.01)
dxs = dnorm(xs,mean=53,sd=10)
points(xs,dxs,type="l",col = "red", lwd = 2)
wm = sum(xxm*fx)/N; wm
weighted.mean(xxm, wt, na.rm = TRUE)
library(Hmisc)
wtd.mean(xxm, weights=wt, normwt=FALSE, na.rm=TRUE)
library(modi)
ws2 = weighted.var(xxm, wt, na.rm = TRUE); ws2
ws = sqrt(ws2); ws
library(Hmisc)
wtd.var(xxm, weights=wt, normwt=FALSE, na.rm=TRUE,method='ML')
# method=c('unbiased', 'ML')
sqrt(wtd.var(xxm, weights=wt, normwt=FALSE, na.rm=TRUE,method='ML'))
wm + c(-s,s)*qt(0.95,df=N-1)/sqrt(N)
library(LearningStats)
Mean.CI(wm, sigma = NULL, sc = NULL, s = s, n = N, conf.level= 0.95)
ws*sqrt((N-1)/qchisq(c(0.975,0.025),df=N-1))
library(LearningStats)
variance.CI(x = NULL, s = ws, sc = NULL, smu = NULL, mu = NULL, n = N, conf.level= 0.95)
library(sn)
fitdistr.grouped(breaks=xx, counts=fx, family="normal", trace = FALSE, wpar = NULL)
```
Metodou EDXRF byly stanoveny prvky v keramické mase a glazuře keramiky z lokalit Longquan a Jingdezhen. Ke zkoumání surovin a technologie výpalu bylo vybráno celkem čtyřicet typických střepů
(https://archive.ics.uci.edu/ml/datasets/Chemical+Composition+of+Ceramic+Samples)
```{r}
data <- read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/00583/Chemical Composion of Ceramic.csv")
head(data)
ss <- function(X){substr(X, start=1, stop=2)}
snam <- ss(data[,1])
snam
MM = as.data.frame(cbind(snam,data[,"Part"],data["MnO"]))
colnames(MM) = c("Site","Part","MnO")
mno = MM[,"MnO"]
part = as.factor(MM[,"Part"])
site = as.factor(MM[,"Site"])
aggregate(mno,list(site,part), FUN=mean)
aggregate(mno,list(site,part), FUN=median)
aggregate(mno,list(site,part), FUN=length)
aggregate(mno,list(site,part), FUN=function(x){sd(x)/mean(x)})
### Distribuce MnO v keramice jedné z lokalit.
mm = MM[which(MM$Site=="DY"),]
mno = mm[,"MnO"]
## Normalita
# Cullen Frey plot
library(fitdistrplus)
descdist(mno, discrete=FALSE)
descdist(mno, discrete=FALSE, boot=999)
descdist(log(mno), discrete=FALSE, boot=999)
library(PASWR2)
eda(mno, trim = 0, dec = 3)
eda(log(mno), trim = 0, dec = 3)
library(s20x)
normcheck(mno)
normcheck(log(mno))
library(UsingR)
simple.eda(mno)
simple.eda(log(mno))
library(StatDA)
edaplot(mno,H.freq=FALSE,box=TRUE,H.breaks=30,S.pch=3,S.cex=0.5,D.lwd=1.5,P.log=FALSE,P.main="",P.xlab="Zmena alk. rezervy", P.ylab="Density",B.pch=3,B.cex=0.5)
edaplot(mno,H.freq=FALSE,box=TRUE,H.breaks=30,S.pch=3,S.cex=0.5,D.lwd=1.5,P.log=TRUE,P.main="",P.xlab="Zmena alk. rezervy", P.ylab="Density",B.pch=3,B.cex=0.5)
library(cwhmisc) # T3 plot (Ghosh)
T3plot(mno,lab=paste("T3 plot of ",deparse(substitute(x))), legend.pos="bottom", cex=0.6)
T3plot(log(mno),lab=paste("T3 plot of ",deparse(substitute(x))), legend.pos="bottom", cex=0.6)
# Box-Cox
library(car)
boxCox(lm(mno~1), lambda = seq(-2, 2, 1/10), plotit = TRUE, interp = TRUE, eps = 1/50, xlab=NULL, ylab=NULL, family="bcPower", param= "lambda", gamma=NULL,grid=TRUE)
library(forecast)
lambda = BoxCox.lambda(mno, method = "loglik", lower = -2, upper = 2)
# method = c("guerrero", "loglik")
xbc = BoxCox(mno, lambda)
xrt = InvBoxCox(xbc, lambda, biasadj = FALSE, fvar = NULL)
par(mfrow=c(1,2))
hist(mno, main="")
hist(xbc, main="",xlab="transf x")
par(mfrow=c(1,1))
er <- qt(0.975,df=length(xbc)-1)*sd(xbc)/sqrt(length(xbc))
left <- mean(xbc)-er; left
right <- mean(xbc)+er; right
InvBoxCox(c(mean(xbc), left, right), lambda, biasadj = FALSE, fvar = NULL)
## Bhattacharya plot s ASH
library(ash)
nn=30
f <- ash1(bin1(mno,nbin=nn),5) # ash estimate
nk<-length(f$y)-1
yk<-rep(0,nk)
xk<-rep(0,nk)
for(i in 1:nk){
yk[i]<-log(f$y[i+1]/f$y[i])
xk[i]<-(f$x[i+1]+f$x[i])/2
}
plot(xk,yk, type="l",col=1,lty=1, main="", xlab="x",ylab="ln( f(i+1) / f(i) )")
rug(mno)
# Hartigan dip test
library(diptest)
dtb = dip(mno, full.result = TRUE, min.is.0 = TRUE, debug = FALSE); dtb
dip.test(mno, simulate.p.value = FALSE, B = 2000)
plot(dtb)
# Silvermanuv test
library(silvermantest)
# https://www.mathematik.uni-marburg.de/~stochastik/R_packages/
silverman.test(mno, k=1, M = 999, adjust = FALSE, digits = 6)
silverman.test(mno, k=2, M = 999, adjust = FALSE, digits = 6)
silverman.test(mno, k=3, M = 999, adjust = FALSE, digits = 6)
silverman.plot(mno, kmin=1, kmax=3, alpha=0.05, adjust=FALSE)
# Ameijeiras-Alonso excess mass test
library(multimode)
modetest(mno,mod0=1,method="ACR",B=500,lowsup=-Inf,uppsup=Inf,submethod=NULL,n=NULL,tol=NULL,tol2=NULL,gridsize=NULL,alpha=NULL,nMC=NULL,BMC=NULL)
# metoda max verohodnosti
library(mclust)
xs<- sort(mno)
dmc <- Mclust(xs,G=NULL)
dmcBIC <- mclustBIC(x); dmcBIC
nn = 2
dmc <- Mclust(xs,G=nn)
dmcz <-round(dmc$z,nn); as.data.frame(cbind(mm[,2],dmcz))
```
Byl zjišťován obsah tuku (v %) v mléce od 16 ovcí. Určete vhodnou střední hodnotu tučnosti mléka.
```{r}
xmo = c(4.08, 3.91, 4.02, 4.26, 4.03, 3.94, 3.82, 3.72, 4.03, 3.87, 4.09, 3.92, 4.18, 3.93, 3.81, 3.71)
### Overeni predpokladu
## Nezavislost
library(s20x)
trendscatter(c(1:length(xmo)), y = xmo, f = 0.5, xlab = "Index", ylab = "Obsah tuku (%)", main = "")
library(DescTools)
VonNeumannTest(xmo, alternative = "two.sided", unbiased = TRUE)
# alternative = c("two.sided", "less", "greater")
library(DescTools)
BartelsRankTest(xmo, alternative = "two.sided", method = "normal")
# alternative = c("two.sided", "trend", "oscillation")
# method = c("normal", "beta", "auto")
library(litteR)
mk = mann_kendall(xmo, type = "both")
# type = c("both", "increasing", "decreasing")
p_value(mk)
## Normalita
# Cullen Frey plot
library(fitdistrplus)
descdist(xmo, discrete=FALSE)
descdist(xmo, discrete=FALSE, boot=999)
library(PASWR2)
eda(xmo, trim = 0, dec = 3)
library(s20x)
normcheck(xmo)
library(UsingR)
simple.eda(xmo)
library(StatDA)
edaplot(xmo,H.freq=FALSE,box=TRUE,H.breaks=30,S.pch=3,S.cex=0.5,D.lwd=1.5,P.log=FALSE,P.main="",P.xlab="Zmena alk. rezervy", P.ylab="Density",B.pch=3,B.cex=0.5)
library(cwhmisc) # T3 plot (Ghosh)
T3plot(xmo,lab=paste("T3 plot of ",deparse(substitute(x))), legend.pos="bottom", cex=0.6)
library(moments)
moments::skewness(xmo)
library(e1071)
e1071::skewness(xmo, na.rm = FALSE, type = 3)
library(confintr)
ci_skewness(xmo,probs = c(0.025, 0.975),type = "bootstrap", boot_type = "bca", R = 9999,seed = NULL)
# boot_type = c("bca", "perc", "norm", "basic")
# DAgostinuv test sikmosti
library(moments)
agostino.test(xmo, alternative = "two.sided")
# alternative = c("two.sided", "less", "greater")
library(moments)
kurtosis(xmo)# spicatost
library(confintr)
ci_kurtosis(xmo,probs = c(0.025, 0.975),type = "bootstrap", boot_type = "bca", R = 9999, seed = NULL)
# boot_type = c("bca", "perc", "norm", "basic")
# Anscombe-Glynnuv test spicatosti
library(moments)
anscombe.test(xmo, alternative = "two.sided")
# alternative = c("two.sided", "less", "greater")
# Geary test spicatosti
library(fmsb)
geary.test(xmo)
# Shapiro-Francia test
library(nortest)
sf.test(xmo)
# Shapiro-Wilk test
shapiro.test(xmo)
# Anderson-Darling test
library(nortest)
ad.test(xmo)
# Cramer-von Mises test
cvm.test(xmo)
# Jarque Bera test
library(tseries)
jarque.bera.test(xmo)
library(lawstat)
rjb.test(xmo, option="JB",crit.values="empirical", N = 5000) # klasicky J. B. test
rjb.test(xmo, option="RJB",crit.values="empirical", N = 5000) # robustni J. B. test
# crit.values = c("chisq.approximation", "empirical")
# Lilliefors test (modif. Kolmogorov-Smirnov)
library(nortest)
lillie.test(xmo)
# sj test
library(lawstat)
sj.test(xmo, crit.values = "empirical", N = 1000)
# crit.values = c("t.approximation", "empirical")
## outliers
boxplot.stats(xmo, coef = 1.5, do.conf = TRUE, do.out = TRUE)
## parametry
mean(xmo)
library(confintr)
ci_mean(xmo, probs = c(0.025, 0.975),type = "t")
ci_mean(xmo, probs = c(0.025, 0.975),type = "bootstrap", boot_type = "bca", R = 9999, seed = NULL)
# boot_type = c("stud" (default), "bca", "perc", "norm", "basic")
library(MKinfer)
meanCI(xmo, conf.level = 0.95, boot = FALSE, na.rm = TRUE, alternative = "two.sided")
meanCI(xmo, conf.level = 0.95, boot = TRUE, R = 9999, bootci.type = "all",na.rm = TRUE, alternative = "two.sided")
# alternative = c("two.sided", "less", "greater")
```
Obsah Ba v kostech (ppm) z lokality Dolní Věstonice - Písky.
```{r}
xba = c(102.48078, 135.75333, 130.46889, 138.71764, 104.37857, 73.57628,48.91491,67.98234, 101.35093, 60.16596, 90.75023, 47.34744, 49.33503, 76.46496, 175.35793, 130.49675, 148.01476, 114.79635, 65.91149, 83.92482, 71.02072, 34.66255, 96.01142, 78.07690, 54.59542, 158.29052, 119.87277, 69.14092, 78.84901, 41.29317, 72.81092, 554.77243, 259.68719, 149.31984, 107.85497,
82.09231, 26.01403, 98.94641, 56.41384, 86.10823, 118.56925, 94.48836,
65.17541)
# Cullen Frey plot
library(fitdistrplus)
descdist(xba, discrete=FALSE)
descdist(xba, discrete=FALSE, boot=999)
descdist(log(xba), discrete=FALSE, boot=999)
library(PASWR2)
eda(xba, trim = 0.05, dec = 3)
eda(log(xba), trim = 0.05, dec = 3)
library(s20x)
normcheck(xba)
normcheck(log(xba))
library(UsingR)
simple.eda(xba)
simple.eda(log(xba))
library(StatDA)
edaplot(xba,H.freq=FALSE,box=TRUE,H.breaks=30,S.pch=3,S.cex=0.5,D.lwd=1.5,P.log=FALSE,P.main="",P.xlab="Cu [ppm]",P.ylab="Density",B.pch=3,B.cex=0.5)
library(StatDA)
edaplot(xba,H.freq=FALSE,box=TRUE,H.breaks=30,S.pch=3,S.cex=0.5,D.lwd=1.5,P.log=TRUE,P.main="",P.xlab="Cu [ppm]",P.ylab="Density",B.pch=3,B.cex=0.5)
boxplot.stats(xba, coef = 1.5, do.conf = TRUE, do.out = TRUE)
library(univOutl)
boxB(xba, k=1.5, method="resistant", weights=NULL, id=NULL, exclude=NA, logt=FALSE)
boxB(xba, k=1.5, method='asymmetric', weights=NULL, id=NULL,exclude=NA, logt=FALSE)$outliers
boxB(xba, k=1.5, method='adjbox', weights=NULL, id=NULL, exclude=NA, logt=FALSE)$outliers
library(extremevalues)
#getOutliers(xba,method="I",distribution="normal")
#getOutliers(xba,method="II",distribution="normal")
getOutliers(xba,method="I",distribution="lognormal")
getOutliers(xba,method="II",distribution="lognormal")
L <- getOutliers(xch,method="II",distribution="normal")
outlierPlot(xch,L,mode="qq")
outlierPlot(xch,L,mode="residual")
library(OutlierDetection)
UnivariateOutlierDetection(xba, k = 0.05 * length(xch), cutoff = 0.95, dist = FALSE, dens = FALSE, depth = FALSE, Method = "euclidean", rnames = FALSE)
mno = xba[-32]
# mno = xba[-c(32,33)]
# Box-Cox
library(car)
boxCox(lm(mno~1), lambda = seq(-2, 2, 1/10), plotit = TRUE, interp = TRUE, eps = 1/50, xlab=NULL, ylab=NULL, family="bcPower", param= "lambda", gamma=NULL,grid=TRUE)
library(forecast)
lambda = BoxCox.lambda(mno, method = "loglik", lower = -2, upper = 2)
# method = c("guerrero", "loglik")
xbc = BoxCox(mno, lambda)
xrt = InvBoxCox(xbc, lambda, biasadj = FALSE, fvar = NULL)
par(mfrow=c(1,2))
hist(mno, main="")
hist(xbc, main="",xlab="transf x")
par(mfrow=c(1,1))
er <- qt(0.975,df=length(xbc)-1)*sd(xbc)/sqrt(length(xbc))
left <- mean(xbc)-er
right <- mean(xbc)+er
InvBoxCox(c(mean(xbc), left, right), lambda, biasadj = FALSE, fvar = NULL)
mean(xba)
median(xba)
sd(xba, na.rm=TRUE)
mean(mno)
median(mno)
sd(mno, na.rm=TRUE)
library(FSA)
geomean(mno, na.rm = FALSE, zneg.rm = FALSE)
library(DescTools)
Gmean(mno, method = "boot", conf.level = 0.05,sides = "two.sided", na.rm = FALSE)
# method = c("classic", "boot")
# sides = c("two.sided","left","right")
library(fuel)
lognormalmean(mno, estimator="ml", base = exp(1),n = length(mno), m = n - 1, d = 1/n)
library(FSA)
sg = geosd(mno, na.rm = FALSE, zneg.rm = FALSE); 10^sg
library(fuel)
lognormalsd(mno, estimator="ml", base = exp(1),n = length(mno),m = n - 1, d = 1/n)
library(DescTools)
Gsd(mno, na.rm = FALSE)
### Kvantilove odhady
median(xba)
library(confintr)
ci_median(xba,probs = c(0.025, 0.975),type = "bootstrap", boot_type = "bca", R = 9999,seed = NULL)
# type = c("binomial", "bootstrap")
# boot_type = c("bca", "perc", "norm", "basic")
library(MKinfer)
medianCI(xba, conf.level = 0.95, method = "exact",R = 9999, bootci.type = "bca",minLength = FALSE, na.rm = FALSE,alternative = "two.sided")
# method = c("exact", "asymptotic")
# bootci.type = c("norm", "basic", "perc", "bca"),
# alternative = c("two.sided", "less", "greater")
mad(xba, center = median(xba), constant = 1.4826, na.rm = FALSE)
library(rQCC)
mad.unbiased (xba, center = median(xba), constant=1.4826, na.rm = FALSE)
mad2.unbiased(xba, center = median(xba), constant=1.4826, na.rm = FALSE)
library(fitdistrplus)
flm = fitdist(mno,distr="lnorm", method = "mle")
# method = c("mle", "mme", "qme", "mge", "mse")
summary(flm)
fga = fitdist(mno,distr="gamma", method = "mle")
# method = c("mle", "mme", "qme", "mge", "mse")
summary(fga)
```
Při zjišťování obsahu uhlíku v uhlí byly (za předpokladu normálního rozdělení) z 20 měření vypočteny průměrný obsah 83.050 % a směrodatná odchylka 3.456 %. Určete 95% intervaly spolehlivosti obou parametrů.
```{r echo=FALSE, message=FALSE, warning=FALSE}
m = 83.050
s = 3.456
n = 20
m + c(-s, s)*qt(0.975,df=n-1)/sqrt(n)
library(LearningStats)
Mean.CI(m, sigma = NULL, sc = NULL, s = s, n = n, conf.level= 0.95)$CI
s*sqrt((n-1)/qchisq(c(0.975, 0.025), df=n-1))
library(LearningStats)
variance.CI(x = NULL, s = s, sc = NULL, smu = NULL, mu = NULL, n = n, conf.level= 0.95)
```
Aktivita enzymu podílejícího se na metabolismu karcinogenních látek. Jde o hodnoty močového metabolického poměru 5-acetylamino-6-formylamino-3-methyluracilu k1-methylxantinu (AFMU/1X) po perorálním podání kofeinu.
```{r}
library(multimode)
data(enzyme)
xea = enzyme
## Normalita
# Cullen Frey plot
library(fitdistrplus)
descdist(xea, discrete=FALSE)
descdist(xea, discrete=FALSE, boot=999)
descdist(log(xea), discrete=FALSE, boot=999)
library(PASWR2)
eda(xea, trim = 0, dec = 3)
eda(log(xea), trim = 0, dec = 3)
library(s20x)
normcheck(xea)
normcheck(log(xea))
library(UsingR)
simple.eda(xea)
simple.eda(log(xea))
library(StatDA)
edaplot(xea,H.freq=FALSE,box=TRUE,H.breaks=30,S.pch=3,S.cex=0.5,D.lwd=1.5,P.log=FALSE,P.main="",P.xlab="Aktivita enzymu", P.ylab="Density",B.pch=3,B.cex=0.5)
edaplot(xea,H.freq=FALSE,box=TRUE,H.breaks=30,S.pch=3,S.cex=0.5,D.lwd=1.5,P.log=TRUE,P.main="",P.xlab="Aktivita enzymu", P.ylab="Density",B.pch=3,B.cex=0.5)
# Box-Cox
library(car)
boxCox(lm(xea~1), lambda = seq(-2, 2, 1/10), plotit = TRUE, interp = TRUE, eps = 1/50, xlab=NULL, ylab=NULL, family="bcPower", param= "lambda", gamma=NULL,grid=TRUE)
library(forecast)
lambda = BoxCox.lambda(xea, method = "loglik", lower = -2, upper = 2)
# method = c("guerrero", "loglik")
xbc = BoxCox(xea, lambda)
xrt = InvBoxCox(xbc, lambda, biasadj = FALSE, fvar = NULL)
par(mfrow=c(1,2))
hist(xea, main="")
hist(xbc, main="",xlab="transf x")
par(mfrow=c(1,1))
er <- qt(0.975,df=length(xbc)-1)*sd(xbc)/sqrt(length(xbc))
left <- mean(xbc)-er; left
right <- mean(xbc)+er; right
InvBoxCox(c(mean(xbc), left, right), lambda, biasadj = FALSE, fvar = NULL)
## Bhattacharya plot s ASH
dat = xea
library(ash)
nn=30
f <- ash1(bin1(dat,nbin=nn),5) # ash estimate
nk<-length(f$y)-1
yk<-rep(0,nk)
xk<-rep(0,nk)
for(i in 1:nk){
yk[i]<-log(f$y[i+1]/f$y[i])
xk[i]<-(f$x[i+1]+f$x[i])/2
}
plot(xk,yk, type="l",col=1,lty=1, main="", xlab="x",ylab="ln( f(i+1) / f(i) )")
rug(dat)
# Hartigan dip test
library(diptest)
dtb = dip(xea, full.result = TRUE, min.is.0 = TRUE, debug = FALSE); dtb
dip.test(xea, simulate.p.value = FALSE, B = 2000)
plot(dtb)
# Silvermanuv test
library(silvermantest)
# https://www.mathematik.uni-marburg.de/~stochastik/R_packages/
silverman.test(mno, k=1, M = 999, adjust = FALSE, digits = 6)
silverman.test(mno, k=2, M = 999, adjust = FALSE, digits = 6)
silverman.test(mno, k=3, M = 999, adjust = FALSE, digits = 6)
silverman.test(mno, k=4, M = 999, adjust = FALSE, digits = 6)
silverman.plot(mno, kmin=1, kmax=3, alpha=0.05, adjust=FALSE)
# Ameijeiras-Alonso excess mass test
library(multimode)
modetest(mno,mod0=1,method="ACR",B=500,lowsup=-Inf,uppsup=Inf,submethod=NULL,n=NULL,tol=NULL,tol2=NULL,gridsize=NULL,alpha=NULL,nMC=NULL,BMC=NULL)
# metoda max verohodnosti
library(mclust)
xs<- sort(mno)
dmc <- Mclust(xs,G=NULL)
dmcBIC <- mclustBIC(x); dmcBIC
nn = 3
dmc <- Mclust(xs,G=nn)
dmcz <-round(dmc$z,nn); dmcz
```
Procento oxidu křemičitého (v %) ve 22 chondritových meteoritech.
```{r}
library(multimode)
data(chondrite)
xcd = chondrite
## Normalita
# Cullen Frey plot
library(fitdistrplus)
descdist(xcd, discrete=FALSE)
descdist(xcd, discrete=FALSE, boot=999)
descdist(log(xcd), discrete=FALSE, boot=999)
library(PASWR2)
eda(xcd, trim = 0, dec = 3)
eda(log(xcd), trim = 0, dec = 3)
library(s20x)
normcheck(xcd)
normcheck(log(xcd))
library(UsingR)
simple.eda(xcd)
simple.eda(log(xcd))
library(StatDA)
edaplot(xcd, H.freq=FALSE,box=TRUE,H.breaks=30,S.pch=3,S.cex=0.5,D.lwd=1.5,P.log=FALSE,P.main="",P.xlab="SiO2", P.ylab="Density",B.pch=3,B.cex=0.5)
edaplot(xcd,H.freq=FALSE,box=TRUE,H.breaks=30,S.pch=3,S.cex=0.5,D.lwd=1.5,P.log=TRUE,P.main="",P.xlab="SiO2", P.ylab="Density",B.pch=3,B.cex=0.5)
# Box-Cox
library(car)
boxCox(lm(xcd~1), lambda = seq(-2, 2, 1/10), plotit = TRUE, interp = TRUE, eps = 1/50, xlab=NULL, ylab=NULL, family="bcPower", param= "lambda", gamma=NULL,grid=TRUE)
library(forecast)
lambda = BoxCox.lambda(xcd, method = "loglik", lower = -2, upper = 2)
# method = c("guerrero", "loglik")
xbc = BoxCox(xcd, lambda)
xrt = InvBoxCox(xbc, lambda, biasadj = FALSE, fvar = NULL)
par(mfrow=c(1,2))
hist(xcd, main="")
hist(xbc, main="",xlab="transf x")
par(mfrow=c(1,1))
er <- qt(0.975,df=length(xbc)-1)*sd(xbc)/sqrt(length(xbc))
left <- mean(xbc)-er; left
right <- mean(xbc)+er; right
InvBoxCox(c(mean(xbc), left, right), lambda, biasadj = FALSE, fvar = NULL)
## Bhattacharya plot s ASH
dat = xcd
library(ash)
nn=30
f <- ash1(bin1(dat,nbin=nn),5) # ash estimate
nk<-length(f$y)-1
yk<-rep(0,nk)
xk<-rep(0,nk)
for(i in 1:nk){
yk[i]<-log(f$y[i+1]/f$y[i])
xk[i]<-(f$x[i+1]+f$x[i])/2
}
plot(xk,yk, type="l",col=1,lty=1, main="", xlab="x",ylab="ln( f(i+1) / f(i) )")
rug(dat)
# Hartigan dip test
library(diptest)
dtb = dip(xcd, full.result = TRUE, min.is.0 = TRUE, debug = FALSE); dtb
dip.test(xcd, simulate.p.value = FALSE, B = 2000)
plot(dtb)
# Silvermanuv test
dat = xcd
library(silvermantest)
# https://www.mathematik.uni-marburg.de/~stochastik/R_packages/
silverman.test(dat, k=1, M = 999, adjust = FALSE, digits = 6)
silverman.test(dat, k=2, M = 999, adjust = FALSE, digits = 6)
silverman.test(dat, k=3, M = 999, adjust = FALSE, digits = 6)
silverman.plot(dat, kmin=1, kmax=3, alpha=0.05, adjust=FALSE)
# Ameijeiras-Alonso excess mass test
library(multimode)
modetest(xcd,mod0=1,method="ACR",B=500,lowsup=-Inf,uppsup=Inf,submethod=NULL,n=NULL,tol=NULL,tol2=NULL,gridsize=NULL,alpha=NULL,nMC=NULL,BMC=NULL)
# metoda max verohodnosti
library(mclust)
xs<- sort(xcd)
dmc <- Mclust(xs,G=NULL)
dmcBIC <- mclustBIC(x); dmcBIC
nn = 2
dmc <- Mclust(xs,G=nn)
dmcz <-round(dmc$z,nn); dmcz
```
Určete počet paralelních kolorimetrických stanovení obsahu Fe, aby maximální chyba výsledné hodnoty průměru nepřekročila hodnotu 0.005. Chyba metody (směrodatná odchylka) je 0.0132.
```{r}
delta = 0.005
sigma = 0.0132
np = (3*sigma/delta)^2; np
nz = ((sigma*qnorm(0.975))/delta)^2; nz
library(samplingbook)
sample.size.mean(e=delta, S=sigma, N = Inf, level = 0.95)
qt = 3
nt = ((sigma*qt)/delta)^2; nt
# iterace
nt = nt-1
nt = ((sigma*qt(0.975,(nt-1)))/delta)^2; nt
```
Automatický soustruh vyrábí těsnící kroužky o deklarovaném průměru 25 mm. Náhodně vybraných 30 kroužků mělo průměrnou hodnotu 25.43 mm a směrodatnou odchylku 0.145 mm. Je možné při riziku 5 % připustit, že automat dodržuje nominální hodnotu 25 mm?
```{r}
library(PASWR2)
tsum.test(mean.x = 25.43, s.x = 0.145, n.x = 30, mean.y = NULL, s.y = NULL,n.y = NULL, alternative = "two.sided", mu = 25,var.equal = FALSE,conf.level = 0.95)
# alternative = c("two.sided", "less", "greater")
```
Při použití dvou odlišných metod měření byly na 10 vzorcích zjištěny rozdíly v naměřených údajích.Rozhodněte, zda existuje systematický rozdíl mezi oběma metodami (na hladině významnosti 0.05).
```{r}
dx = c(1.2, -3.5, 0.5, 1.4, -0.8, 5.1, 6.8, 1.7, 0.9, -2.3)
library(s20x)
normcheck(dx)
# One sample t-test
t.test(dx,mu=0, alternative = "two.sided")
# Robustni one sample t-test
library(rt.test)
rt.test(dx, alternative = "two.sided",mu = 0, test.stat = "TA", conf.level = 0.95)
# alternative = c("two.sided", "less", "greater")
# test.stat = c("TA", "TB"), based on the empirical distributions of the TA statistic (based on median and MAD) and the TBstatistic (based on Hodges-Lehmann and Shamos
# Bootstrap t-Test
library(MKinfer)
boot.t.test(dx, y = NULL,alternative = "two.sided", mu = 0, paired = FALSE, var.equal = FALSE,conf.level = 0.95, R = 9999, symmetric = FALSE)
# alternative = c("two.sided", "less", "greater")
library(Bolstad)
bayes.t.test(dx, y = NULL,alternative = "two.sided", mu = 0, paired = FALSE,var.equal = TRUE, conf.level = 0.95,prior = "jeffreys", m = NULL,n0 = NULL,sig.med = NULL, kappa = 1,sigmaPrior = "chisq",nIter = 10000, nBurn = 1000)
# alternative = c("two.sided", "less", "greater")
# prior = c("jeffreys", "joint.conj")
library(BSDA)
SIGN.test(dx, md = 0, alternative = "two.sided",conf.level = 0.95)
library(DescTools)
SignTest(dx,mu = 0, alternative = "two.sided",conf.level = 0.95)
# one sample Wilcoxon (Mann-Whitney) test
wilcox.test(dx, y,alternative = "two.sided", mu = 0, paired = FALSE, exact = FALSE, correct = TRUE,conf.int = FALSE, conf.level = 0.95)
# alternative = c("two.sided", "less", "greater")
library(exactRankTests)
wilcox.exact(dx, y, mu = 0,alternative = "two.sided",paired = FALSE, exact = FALSE,conf.int = TRUE, conf.level = 0.95)
# "two.sided"(default),"greater"or"less"
```
Výsledky měření téže konstanty dvěma metodami poskytly normálně rozdělené hodnoty výběrů (n1 = 25 a n2 = 31) se směrodatnými odchylkami s1 = 0.523 a s2 = 0.363. Lze na hladině významnosti 0.05 považovat obě metody za stejně přesné?
```{r}
library(LearningStats)
diffvariance.test(sc1 = 0.523, sc2 = 0.363, n1 = 25, n2 = 31, alternative = "two.sided",alpha = 0.05, plot = TRUE)
diffvariance.CI(sc1 = 0.523, sc2 = 0.363, n1 = 25, n2 = 31, conf.level= 0.05)
```
Balicí automat zhotovuje balíčky o dané hmotnosti s maximální směrodatnou odchylkou 0.4 g. Ze zkušebního vzorku 10 balíčků byla vypočtena výběrová směrodatná odchylka 0.5 g. Rozhodněte (pro hladinu významnosti 5 %), zda nedošlo k poruše zařízení (nebyla překročena maximální směrodatná odchylka).
```{r}
n = 10
sig = 0.4
ss = 0.5
library(LearningStats)
variance.test(x = NULL, s = NULL, sc = ss, smu = NULL, mu = NULL,
n = n, sigma02= sig^2, alternative = "greater", alpha = 0.05,
plot = TRUE, lwd = 1)
```
Hladiny olova v krvi 33 dětí rodičů, kteří pracovali v továrně na výrobu olova, a 33 kontrolních dětí z jejich okolí.
```{r}
library(PairedData)
data(BloodLead)
xe = BloodLead[,2]
xc = BloodLead[,3]
library(PASWR2)
eda((xe-xc), trim = 0, dec = 3)
library(s20x)
normcheck((xe-xc))
library(PairedData)
boxplot(xc,xe)
plot(xc,xe,xlim=c(0,80),ylim=c(0,80))
abline(0,1)
summary(paired(xe, xc),tr=0.2)
effect.size(paired(xe, xc),tr=0.2)
slidingchart(paired(xe, xc))
# Cohen d (standardized mean difference)
mean(xe-xc)/sd(xe-xc)
library(lsr)
d = cohensD(xe, xc,method = "paired"); d
library(effsize)
cohen.d(xe, xc, paired = TRUE)
cor(xe, xc,method = "pearson")
cor(xe, xc,method = "spearman")
# t-test
t.test(xe, xc, paired = TRUE, alternative = "two.sided")
# t.test(elem ~ lom, data = data, paired = TRUE, alternative = "two.sided")
# Bootstrap t-Test
library(MKinfer)
boot.t.test(xe, xc, alternative = "two.sided",mu = 0, paired = TRUE, var.equal = FALSE, conf.level = 0.95, R = 9999, symmetric = FALSE)
# alternative = c("two.sided", "less", "greater")
library(CarletonStats)
bootPaired(xe, xc, conf.level = 0.95, B = 10000,plot.hist = TRUE, plot.qq = FALSE, legend.loc = "topright",x.name = deparse(substitute(x)), y.name = deparse(substitute(y)))
# Yuen's trimmed mean test
library(PairedData)
yuen.t.test(xe, xc, tr = 0.2, alternative = "two.sided", mu = 0, paired = TRUE, conf.level = 0.95)
# alternative = c("two.sided", "less", "greater")
# Chenuv paired sample t-test pro zesikmenou distribuci
library(EnvStats)
chenTTest(xe, xc , alternative = "greater", mu = 0, paired = TRUE, conf.level = 0.95, ci.method = "z")
# alternative = c("less", "greater")
# ci.method = "z", "t"
library(Bolstad)
bayes.t.test(xe, xc,alternative = "two.sided", mu = 0, paired = TRUE,var.equal = TRUE, conf.level = 0.95,prior = "jeffreys", m = NULL,n0 = NULL,sig.med = NULL, kappa = 1,sigmaPrior = "chisq",nIter = 10000, nBurn = 1000)
# alternative = c("two.sided", "less", "greater")
# prior = c("jeffreys", "joint.conj"),
## Wilcoxonuv parovy test
wilcox.test(xe, xc, paired = TRUE, alternative = "two.sided", conf.int = TRUE)
library(PairedData)
wilcox.test(paired(xe, xc))
## medianovy (znamenkovy) parovy test
library(EnvStats)
signTest(xe, xc, alternative = "two.sided", mu = 0, paired = TRUE, conf.level = 0.95)
# Permutation test for paired data
library(CarletonStats)
permTestPaired(xe, xc, B = 9999,alternative = "two.sided", plot.hist = TRUE,legend.loc = "topright", plot.qq = FALSE,x.name = deparse(substitute(x)), y.name = deparse(substitute(y)))
plot((xe+xc)/2, (xe-xc))
```
Stanovení koncentrace F3+ (v %) v přípravku "Ferrum citricum ammoniatum viride" se provádí titrací jodu (uvolněného oxidací KI železitými ionty) thiosíranem na škrobový maz. Titrace byla provedna u jedné části vzorků ihned a u druhé 30 minut po přidání KI. Líší se na hladině významnosti 0.05 výsledky obou postupů?
```{r}
xih = c(13.29, 13.36, 13.32, 13.53, 13.56, 13.43, 13.30, 13.43)
x30 = c(13.86, 13.99, 13.88, 13.91, 13.89, 13.94, 13.80, 13.89)
fe3 = list(xih,x30)
names(fe3) = c("Fe hned", "Fe 30m")
library(reshape2)
fe = setNames(melt(fe3), c('koncentrace','metoda'))
library(lattice)
bwplot(koncentrace~metoda, data=fe)
hist(xih,xlim=c(13,14),col='skyblue',border=T)
hist(x30,add=T,col=scales::alpha('red',.5),border=T)
library(PASWR2)
eda((xih-x30), trim = 0, dec = 3)
library(s20x)
normcheck((xih-x30))
library(PairedData)
boxplot(xih,x30)
plot(xih,x30,xlim=c(13,14),ylim=c(13,14))
abline(0,1)
summary(paired(xih, x30),tr=0.2)
effect.size(paired(xih, x30),tr=0.2)
slidingchart(paired(xih, x30))
# Cohen d (standardized mean difference)
mean(xih-x30)/sd(xih-x30)
library(lsr)
d = cohensD(xih, x30,method = "paired"); d
library(effsize)
cohen.d(xih, x30, paired = TRUE)
cor(xih, x30,method = "pearson")
cor(xih, x30,method = "spearman")
# t-test
t.test(xih, x30, paired = TRUE, alternative = "two.sided")
# t.test(elem ~ lom, data = data, paired = TRUE, alternative = "two.sided")
# Bootstrap t-Test
library(MKinfer)
boot.t.test(xih, x30, alternative = "two.sided",mu = 0, paired = TRUE, var.equal = FALSE, conf.level = 0.95, R = 9999, symmetric = FALSE)
# alternative = c("two.sided", "less", "greater")
library(CarletonStats)
bootPaired(xih, x30, conf.level = 0.95, B = 10000,plot.hist = TRUE, plot.qq = FALSE, legend.loc = "topright",x.name = deparse(substitute(x)), y.name = deparse(substitute(y)))
# Yuen's trimmed mean test
library(PairedData)
yuen.t.test(xih, x30, tr = 0.2, alternative = "two.sided", mu = 0, paired = TRUE, conf.level = 0.95)
# alternative = c("two.sided", "less", "greater")
# Chenuv paired sample t-test pro zesikmenou distribuci
library(EnvStats)
chenTTest(xih, x30 , alternative = "greater", mu = 0, paired = TRUE, conf.level = 0.95, ci.method = "z")
# alternative = c("less", "greater")
# ci.method = "z", "t"
library(Bolstad)
bayes.t.test(xih, x30,alternative = "two.sided", mu = 0, paired = TRUE,var.equal = TRUE, conf.level = 0.95,prior = "jeffreys", m = NULL,n0 = NULL,sig.med = NULL, kappa = 1,sigmaPrior = "chisq",nIter = 10000, nBurn = 1000)
# alternative = c("two.sided", "less", "greater")
# prior = c("jeffreys", "joint.conj"),
## Wilcoxonuv parovy test
wilcox.test(xih, x30, paired = TRUE, alternative = "two.sided", conf.int = TRUE)
library(PairedData)
wilcox.test(paired(xih, x30))
## medianovy (znamenkovy) parovy test
library(EnvStats)
signTest(xih, x30, alternative = "two.sided", mu = 0, paired = TRUE, conf.level = 0.95)
# Permutation test for paired data
library(CarletonStats)
permTestPaired(xih, x30, B = 9999,alternative = "two.sided", plot.hist = TRUE,legend.loc = "topright", plot.qq = FALSE,x.name = deparse(substitute(x)), y.name = deparse(substitute(y)))
plot((xih+x30)/2, (xih-x30))
library(blandr)
bas2 = blandr.statistics (xih,x30, sig.level=0.95)
bas2$bias
bas2$biasUpperCI
bas2$biasLowerCI
bas2$regression.equation
cor(bas2$means,bas2$differences)^2 # R^2
blandr.plot.qq(bas2)
blandr.display.and.draw(method1=xih, method2=x30, plotter = "ggplot",method1name = "Ihned", method2name = "30 min.",plotTitle = "Bland-Altman plot for comparison of 2 methods",sig.level = 0.95, annotate = TRUE, ciDisplay = TRUE,ciShading = TRUE, normalLow = FALSE, normalHigh = FALSE,lowest_y_axis = FALSE, highest_y_axis = FALSE, point_size = 2)
```