##################################
# Loading R libraries
##################################
library(moments)
library(car)
library(multcomp)
library(effects)
library(psych)
library(ggplot2)
library(dplyr)
library(ggpubr)
library(rstatix)
library(ggfortify)
library(trend)
##################################
# Defining a range of possible hypothesized values for mu_Response
##################################
=seq(2.0,4.0,0.2)
mu_Response
##################################
# Defining a fixed hypothesized value for mu_Reference
##################################
=1.5
mu_Reference
##################################
# Defining a fixed hypothesized value for the standard deviation
##################################
=1
sd
##################################
# Defining a fixed hypothesized value for the Type I error
##################################
=0.05
alpha
##################################
# Defining a range of possible hypothesized values for the Type II error
##################################
=c(0.30,0.20,0.10)
beta
##################################
# Computing the range of samples sizes based on different levels of Type II errors
##################################
=beta[1]
beta_1n_Beta30=(sd*(qnorm(1-alpha/2)+qnorm(1-beta_1))/(mu_Response-mu_Reference))^2) (
## [1] 24.6882678 12.5960550 7.6198357 5.1008818 3.6521106 2.7431409
## [7] 2.1356633 1.7097138 1.3995617 1.1667423 0.9875307
ceiling(n_Beta30)
## [1] 25 13 8 6 4 3 3 2 2 2 1
=(mu_Response-mu_Reference)/(sd/sqrt(n_Beta30))
z_Beta30Power_Beta30=pnorm(z_Beta30-qnorm(1-alpha/2))+pnorm(-z_Beta30-qnorm(1-alpha/2))) (
## [1] 0.7000044 0.7000044 0.7000044 0.7000044 0.7000044 0.7000044 0.7000044
## [8] 0.7000044 0.7000044 0.7000044 0.7000044
=beta[2]
beta_2n_Beta20=(sd*(qnorm(1-alpha/2)+qnorm(1-beta_2))/(mu_Response-mu_Reference))^2) (
## [1] 31.395519 16.018122 9.689975 6.486677 4.644308 3.488391 2.715875
## [8] 2.174205 1.779791 1.483720 1.255821
ceiling(n_Beta20)
## [1] 32 17 10 7 5 4 3 3 2 2 2
=(mu_Response-mu_Reference)/(sd/sqrt(n_Beta20))
z_Beta20Power_Beta20=pnorm(z_Beta20-qnorm(1-alpha/2))+pnorm(-z_Beta20-qnorm(1-alpha/2))) (
## [1] 0.800001 0.800001 0.800001 0.800001 0.800001 0.800001 0.800001 0.800001
## [9] 0.800001 0.800001 0.800001
=beta[3]
beta_3n_Beta10=(sd*(qnorm(1-alpha/2)+qnorm(1-beta_3))/(mu_Response-mu_Reference))^2) (
## [1] 42.029692 21.443721 12.972127 8.683821 6.217410 4.669966 3.635787
## [8] 2.910644 2.382636 1.986280 1.681188
ceiling(n_Beta10)
## [1] 43 22 13 9 7 5 4 3 3 2 2
=(mu_Response-mu_Reference)/(sd/sqrt(n_Beta10))
z_Beta10Power_Beta10=pnorm(z_Beta10-qnorm(1-alpha/2))+pnorm(-z_Beta10-qnorm(1-alpha/2))) (
## [1] 0.9000001 0.9000001 0.9000001 0.9000001 0.9000001 0.9000001 0.9000001
## [8] 0.9000001 0.9000001 0.9000001 0.9000001
<- as.data.frame(cbind(mu_Response,n_Beta30,n_Beta20,n_Beta10))
OneSampleDesign.Equality names(OneSampleDesign.Equality) <- c("Posttreatment.Mean.Bone.Density",
"Power=70%",
"Power=80%",
"Power=90%")
##################################
# Restructuring the data
##################################
<- gather(OneSampleDesign.Equality,
OneSampleDesign.Equality.Reshaped "Power=70%","Power=80%","Power=90%",
key="Power",
value="Sample.Size")
$Label <- rep("OSD_EQUALITY",nrow(OneSampleDesign.Equality.Reshaped))
OneSampleDesign.Equality.Reshaped
OneSampleDesign.Equality.Reshaped
## Posttreatment.Mean.Bone.Density Power Sample.Size Label
## 1 2.0 Power=70% 24.6882678 OSD_EQUALITY
## 2 2.2 Power=70% 12.5960550 OSD_EQUALITY
## 3 2.4 Power=70% 7.6198357 OSD_EQUALITY
## 4 2.6 Power=70% 5.1008818 OSD_EQUALITY
## 5 2.8 Power=70% 3.6521106 OSD_EQUALITY
## 6 3.0 Power=70% 2.7431409 OSD_EQUALITY
## 7 3.2 Power=70% 2.1356633 OSD_EQUALITY
## 8 3.4 Power=70% 1.7097138 OSD_EQUALITY
## 9 3.6 Power=70% 1.3995617 OSD_EQUALITY
## 10 3.8 Power=70% 1.1667423 OSD_EQUALITY
## 11 4.0 Power=70% 0.9875307 OSD_EQUALITY
## 12 2.0 Power=80% 31.3955189 OSD_EQUALITY
## 13 2.2 Power=80% 16.0181219 OSD_EQUALITY
## 14 2.4 Power=80% 9.6899750 OSD_EQUALITY
## 15 2.6 Power=80% 6.4866775 OSD_EQUALITY
## 16 2.8 Power=80% 4.6443075 OSD_EQUALITY
## 17 3.0 Power=80% 3.4883910 OSD_EQUALITY
## 18 3.2 Power=80% 2.7158753 OSD_EQUALITY
## 19 3.4 Power=80% 2.1742049 OSD_EQUALITY
## 20 3.6 Power=80% 1.7797913 OSD_EQUALITY
## 21 3.8 Power=80% 1.4837202 OSD_EQUALITY
## 22 4.0 Power=80% 1.2558208 OSD_EQUALITY
## 23 2.0 Power=90% 42.0296922 OSD_EQUALITY
## 24 2.2 Power=90% 21.4437205 OSD_EQUALITY
## 25 2.4 Power=90% 12.9721272 OSD_EQUALITY
## 26 2.6 Power=90% 8.6838207 OSD_EQUALITY
## 27 2.8 Power=90% 6.2174101 OSD_EQUALITY
## 28 3.0 Power=90% 4.6699658 OSD_EQUALITY
## 29 3.2 Power=90% 3.6357865 OSD_EQUALITY
## 30 3.4 Power=90% 2.9106435 OSD_EQUALITY
## 31 3.6 Power=90% 2.3826356 OSD_EQUALITY
## 32 3.8 Power=90% 1.9862804 OSD_EQUALITY
## 33 4.0 Power=90% 1.6811877 OSD_EQUALITY
##################################
# Plotting the sample sizes and power curves
##################################
<- ggplot(OneSampleDesign.Equality.Reshaped,
(OneSampleDesign.Equality.PowerAnalysis aes(x=Posttreatment.Mean.Bone.Density,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Post-Treatment Mean Bone Density",
limits=c(2,4),
breaks=seq(2,4,
by=0.2)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50)) +
ggtitle("One-Sample Design : Test for Equality"))
##################################
# Defining a range of possible hypothesized values for mu_Response
##################################
=seq(2.0,4.0,0.2)
mu_Response
##################################
# Defining a fixed hypothesized value for mu_Reference
##################################
=1.5
mu_Reference
##################################
# Defining a fixed hypothesized value for the standard deviation
##################################
=1
sd
##################################
# Defining a fixed hypothesized value for the Type I error
##################################
=0.05
alpha
##################################
# Defining a range of possible hypothesized values for the Type II error
##################################
=c(0.30,0.20,0.10)
beta
##################################
# Defining the non-inferiority delta
##################################
=-0.50
delta
##################################
# Computing the range of samples sizes based on different levels of Type II errors
##################################
=beta[1]
beta_1n_Beta30=(sd*(qnorm(1-alpha)+qnorm(1-beta_1))/(mu_Response-mu_Reference-delta))^2) (
## [1] 4.7056635 3.2678219 2.4008487 1.8381498 1.4523653 1.1764159 0.9722445
## [8] 0.8169555 0.6961041 0.6002122 0.5228515
ceiling(n_Beta30)
## [1] 5 4 3 2 2 2 1 1 1 1 1
=(mu_Response-mu_Reference-delta)/(sd/sqrt(n_Beta30))
z_Beta30Power_Beta30=pnorm(z_Beta30-qnorm(1-alpha))+pnorm(-z_Beta30-qnorm(1-alpha))) (
## [1] 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683
## [8] 0.7000683 0.7000683 0.7000683 0.7000683
=beta[2]
beta_2n_Beta20=(sd*(qnorm(1-alpha)+qnorm(1-beta_2))/(mu_Response-mu_Reference-delta))^2) (
## [1] 6.1825572 4.2934425 3.1543659 2.4150614 1.9081967 1.5456393 1.2773879
## [8] 1.0733606 0.9145795 0.7885915 0.6869508
ceiling(n_Beta20)
## [1] 7 5 4 3 2 2 2 2 1 1 1
=(mu_Response-mu_Reference-delta)/(sd/sqrt(n_Beta20))
z_Beta20Power_Beta20=pnorm(z_Beta20-qnorm(1-alpha))+pnorm(-z_Beta20-qnorm(1-alpha))) (
## [1] 0.800018 0.800018 0.800018 0.800018 0.800018 0.800018 0.800018 0.800018
## [9] 0.800018 0.800018 0.800018
=beta[3]
beta_3n_Beta10=(sd*(qnorm(1-alpha)+qnorm(1-beta_3))/(mu_Response-mu_Reference-delta))^2) (
## [1] 8.5638474 5.9471162 4.3693099 3.3452529 2.6431628 2.1409618 1.7693899
## [8] 1.4867791 1.2668413 1.0923275 0.9515386
ceiling(n_Beta10)
## [1] 9 6 5 4 3 3 2 2 2 2 1
=(mu_Response-mu_Reference-delta)/(sd/sqrt(n_Beta10))
z_Beta10Power_Beta10=pnorm(z_Beta10-qnorm(1-alpha))+pnorm(-z_Beta10-qnorm(1-alpha))) (
## [1] 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024
## [8] 0.9000024 0.9000024 0.9000024 0.9000024
<- as.data.frame(cbind(mu_Response,n_Beta30,n_Beta20,n_Beta10))
OneSampleDesign.NonInferiority names(OneSampleDesign.NonInferiority) <- c("Posttreatment.Mean.Bone.Density",
"Power=70%",
"Power=80%",
"Power=90%")
##################################
# Restructuring the data
##################################
<- gather(OneSampleDesign.NonInferiority,
OneSampleDesign.NonInferiority.Reshaped "Power=70%","Power=80%","Power=90%",
key="Power",
value="Sample.Size")
$Label <- rep("OSD_NONINFERIORITY",nrow(OneSampleDesign.NonInferiority.Reshaped))
OneSampleDesign.NonInferiority.Reshaped
OneSampleDesign.NonInferiority.Reshaped
## Posttreatment.Mean.Bone.Density Power Sample.Size Label
## 1 2.0 Power=70% 4.7056635 OSD_NONINFERIORITY
## 2 2.2 Power=70% 3.2678219 OSD_NONINFERIORITY
## 3 2.4 Power=70% 2.4008487 OSD_NONINFERIORITY
## 4 2.6 Power=70% 1.8381498 OSD_NONINFERIORITY
## 5 2.8 Power=70% 1.4523653 OSD_NONINFERIORITY
## 6 3.0 Power=70% 1.1764159 OSD_NONINFERIORITY
## 7 3.2 Power=70% 0.9722445 OSD_NONINFERIORITY
## 8 3.4 Power=70% 0.8169555 OSD_NONINFERIORITY
## 9 3.6 Power=70% 0.6961041 OSD_NONINFERIORITY
## 10 3.8 Power=70% 0.6002122 OSD_NONINFERIORITY
## 11 4.0 Power=70% 0.5228515 OSD_NONINFERIORITY
## 12 2.0 Power=80% 6.1825572 OSD_NONINFERIORITY
## 13 2.2 Power=80% 4.2934425 OSD_NONINFERIORITY
## 14 2.4 Power=80% 3.1543659 OSD_NONINFERIORITY
## 15 2.6 Power=80% 2.4150614 OSD_NONINFERIORITY
## 16 2.8 Power=80% 1.9081967 OSD_NONINFERIORITY
## 17 3.0 Power=80% 1.5456393 OSD_NONINFERIORITY
## 18 3.2 Power=80% 1.2773879 OSD_NONINFERIORITY
## 19 3.4 Power=80% 1.0733606 OSD_NONINFERIORITY
## 20 3.6 Power=80% 0.9145795 OSD_NONINFERIORITY
## 21 3.8 Power=80% 0.7885915 OSD_NONINFERIORITY
## 22 4.0 Power=80% 0.6869508 OSD_NONINFERIORITY
## 23 2.0 Power=90% 8.5638474 OSD_NONINFERIORITY
## 24 2.2 Power=90% 5.9471162 OSD_NONINFERIORITY
## 25 2.4 Power=90% 4.3693099 OSD_NONINFERIORITY
## 26 2.6 Power=90% 3.3452529 OSD_NONINFERIORITY
## 27 2.8 Power=90% 2.6431628 OSD_NONINFERIORITY
## 28 3.0 Power=90% 2.1409618 OSD_NONINFERIORITY
## 29 3.2 Power=90% 1.7693899 OSD_NONINFERIORITY
## 30 3.4 Power=90% 1.4867791 OSD_NONINFERIORITY
## 31 3.6 Power=90% 1.2668413 OSD_NONINFERIORITY
## 32 3.8 Power=90% 1.0923275 OSD_NONINFERIORITY
## 33 4.0 Power=90% 0.9515386 OSD_NONINFERIORITY
##################################
# Plotting the sample sizes and power curves
##################################
<-ggplot(OneSampleDesign.NonInferiority.Reshaped,
(OneSampleDesign.NonInferiority.PowerAnalysis aes(x=Posttreatment.Mean.Bone.Density,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Post-Treatment Mean Bone Density",
limits=c(2,4),
breaks=seq(2,4,
by=0.2)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50)) +
ggtitle("One-Sample Design : Test for Non-Inferiority"))
##################################
# Defining a range of possible hypothesized values for mu_Response
##################################
=seq(2.0,4.0,0.2)
mu_Response
##################################
# Defining a fixed hypothesized value for mu_Reference
##################################
=1.5
mu_Reference
##################################
# Defining a fixed hypothesized value for the standard deviation
##################################
=1
sd
##################################
# Defining a fixed hypothesized value for the Type I error
##################################
=0.05
alpha
##################################
# Defining a range of possible hypothesized values for the Type II error
##################################
=c(0.30,0.20,0.10)
beta
##################################
# Defining the superiority delta
##################################
= 0.10
delta
##################################
# Computing the range of samples sizes based on different levels of Type II errors
##################################
=beta[1]
beta_1n_Beta30=(sd*(qnorm(1-alpha)+qnorm(1-beta_1))/(mu_Response-mu_Reference-delta))^2) (
## [1] 29.4103970 13.0712876 7.3525993 4.7056635 3.2678219 2.4008487
## [7] 1.8381498 1.4523653 1.1764159 0.9722445 0.8169555
ceiling(n_Beta30)
## [1] 30 14 8 5 4 3 2 2 2 1 1
=(mu_Response-mu_Reference-delta)/(sd/sqrt(n_Beta30))
z_Beta30Power_Beta30=pnorm(z_Beta30-qnorm(1-alpha))+pnorm(-z_Beta30-qnorm(1-alpha))) (
## [1] 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683
## [8] 0.7000683 0.7000683 0.7000683 0.7000683
=beta[2]
beta_2n_Beta20=(sd*(qnorm(1-alpha)+qnorm(1-beta_2))/(mu_Response-mu_Reference-delta))^2) (
## [1] 38.640983 17.173770 9.660246 6.182557 4.293443 3.154366 2.415061
## [8] 1.908197 1.545639 1.277388 1.073361
ceiling(n_Beta20)
## [1] 39 18 10 7 5 4 3 2 2 2 2
=(mu_Response-mu_Reference-delta)/(sd/sqrt(n_Beta20))
z_Beta20Power_Beta20=pnorm(z_Beta20-qnorm(1-alpha))+pnorm(-z_Beta20-qnorm(1-alpha))) (
## [1] 0.800018 0.800018 0.800018 0.800018 0.800018 0.800018 0.800018 0.800018
## [9] 0.800018 0.800018 0.800018
=beta[3]
beta_3n_Beta10=(sd*(qnorm(1-alpha)+qnorm(1-beta_3))/(mu_Response-mu_Reference-delta))^2) (
## [1] 53.524046 23.788465 13.381011 8.563847 5.947116 4.369310 3.345253
## [8] 2.643163 2.140962 1.769390 1.486779
ceiling(n_Beta10)
## [1] 54 24 14 9 6 5 4 3 3 2 2
=(mu_Response-mu_Reference-delta)/(sd/sqrt(n_Beta10))
z_Beta10Power_Beta10=pnorm(z_Beta10-qnorm(1-alpha))+pnorm(-z_Beta10-qnorm(1-alpha))) (
## [1] 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024
## [8] 0.9000024 0.9000024 0.9000024 0.9000024
<- as.data.frame(cbind(mu_Response,n_Beta30,n_Beta20,n_Beta10))
OneSampleDesign.Superiority names(OneSampleDesign.Superiority) <- c("Posttreatment.Mean.Bone.Density",
"Power=70%",
"Power=80%",
"Power=90%")
##################################
# Restructuring the data
##################################
<- gather(OneSampleDesign.Superiority,
OneSampleDesign.Superiority.Reshaped "Power=70%","Power=80%","Power=90%",
key="Power",
value="Sample.Size")
$Label <- rep("OSD_SUPERIORITY",nrow(OneSampleDesign.Superiority.Reshaped))
OneSampleDesign.Superiority.Reshaped
OneSampleDesign.Superiority.Reshaped
## Posttreatment.Mean.Bone.Density Power Sample.Size Label
## 1 2.0 Power=70% 29.4103970 OSD_SUPERIORITY
## 2 2.2 Power=70% 13.0712876 OSD_SUPERIORITY
## 3 2.4 Power=70% 7.3525993 OSD_SUPERIORITY
## 4 2.6 Power=70% 4.7056635 OSD_SUPERIORITY
## 5 2.8 Power=70% 3.2678219 OSD_SUPERIORITY
## 6 3.0 Power=70% 2.4008487 OSD_SUPERIORITY
## 7 3.2 Power=70% 1.8381498 OSD_SUPERIORITY
## 8 3.4 Power=70% 1.4523653 OSD_SUPERIORITY
## 9 3.6 Power=70% 1.1764159 OSD_SUPERIORITY
## 10 3.8 Power=70% 0.9722445 OSD_SUPERIORITY
## 11 4.0 Power=70% 0.8169555 OSD_SUPERIORITY
## 12 2.0 Power=80% 38.6409827 OSD_SUPERIORITY
## 13 2.2 Power=80% 17.1737701 OSD_SUPERIORITY
## 14 2.4 Power=80% 9.6602457 OSD_SUPERIORITY
## 15 2.6 Power=80% 6.1825572 OSD_SUPERIORITY
## 16 2.8 Power=80% 4.2934425 OSD_SUPERIORITY
## 17 3.0 Power=80% 3.1543659 OSD_SUPERIORITY
## 18 3.2 Power=80% 2.4150614 OSD_SUPERIORITY
## 19 3.4 Power=80% 1.9081967 OSD_SUPERIORITY
## 20 3.6 Power=80% 1.5456393 OSD_SUPERIORITY
## 21 3.8 Power=80% 1.2773879 OSD_SUPERIORITY
## 22 4.0 Power=80% 1.0733606 OSD_SUPERIORITY
## 23 2.0 Power=90% 53.5240459 OSD_SUPERIORITY
## 24 2.2 Power=90% 23.7884649 OSD_SUPERIORITY
## 25 2.4 Power=90% 13.3810115 OSD_SUPERIORITY
## 26 2.6 Power=90% 8.5638474 OSD_SUPERIORITY
## 27 2.8 Power=90% 5.9471162 OSD_SUPERIORITY
## 28 3.0 Power=90% 4.3693099 OSD_SUPERIORITY
## 29 3.2 Power=90% 3.3452529 OSD_SUPERIORITY
## 30 3.4 Power=90% 2.6431628 OSD_SUPERIORITY
## 31 3.6 Power=90% 2.1409618 OSD_SUPERIORITY
## 32 3.8 Power=90% 1.7693899 OSD_SUPERIORITY
## 33 4.0 Power=90% 1.4867791 OSD_SUPERIORITY
##################################
# Plotting the sample sizes and power curves
##################################
<- ggplot(OneSampleDesign.Superiority.Reshaped,
(OneSampleDesign.Superiority.PowerAnalysis aes(x=Posttreatment.Mean.Bone.Density,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Post-Treatment Mean Bone Density",
limits=c(2,4),
breaks=seq(2,4,
by=0.2)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50)) +
ggtitle("One-Sample Design : Test for Superiority"))
##################################
# Defining a range of possible hypothesized values for mu_Response
##################################
=seq(2.0,4.0,0.2)
mu_Response
##################################
# Defining a fixed hypothesized value for mu_Reference
##################################
=1.5
mu_Reference
##################################
# Defining a fixed hypothesized value for the standard deviation
##################################
=1
sd
##################################
# Defining a fixed hypothesized value for the Type I error
##################################
=0.05
alpha
##################################
# Defining a range of possible hypothesized values for the Type II error
##################################
=c(0.30,0.20,0.10)
beta
##################################
# Defining the equivalence delta
##################################
= 0.10
delta
##################################
# Computing the range of samples sizes based on different levels of Type II errors
##################################
=beta[1]
beta_1n_Beta30=(sd*(qnorm(1-alpha)+qnorm(1-beta_1/2))/(delta-abs(mu_Response-mu_Reference)))^2) (
## [1] 44.933125 19.970278 11.233281 7.189300 4.992569 3.668010 2.808320
## [8] 2.218920 1.797325 1.485393 1.248142
ceiling(n_Beta30)
## [1] 45 20 12 8 5 4 3 3 2 2 2
=(abs(mu_Response-mu_Reference)-delta)/(sd/sqrt(n_Beta30))
z_Beta30Power_Beta30=2*(pnorm(z_Beta30-qnorm(1-alpha))+pnorm(-z_Beta30-qnorm(1-alpha)))-1) (
## [1] 0.7000152 0.7000152 0.7000152 0.7000152 0.7000152 0.7000152 0.7000152
## [8] 0.7000152 0.7000152 0.7000152 0.7000152
=beta[2]
beta_2n_Beta20=(sd*(qnorm(1-alpha)+qnorm(1-beta_2/2))/(delta-abs(mu_Response-mu_Reference)))^2) (
## [1] 53.524046 23.788465 13.381011 8.563847 5.947116 4.369310 3.345253
## [8] 2.643163 2.140962 1.769390 1.486779
ceiling(n_Beta20)
## [1] 54 24 14 9 6 5 4 3 3 2 2
=(abs(mu_Response-mu_Reference)-delta)/(sd/sqrt(n_Beta20))
z_Beta20Power_Beta20=2*(pnorm(z_Beta20-qnorm(1-alpha))+pnorm(-z_Beta20-qnorm(1-alpha)))-1) (
## [1] 0.8000048 0.8000048 0.8000048 0.8000048 0.8000048 0.8000048 0.8000048
## [8] 0.8000048 0.8000048 0.8000048 0.8000048
Power_Beta20=2*(pnorm(z_Beta20-qnorm(alpha)))-1) (
## [1] 0.9999952 0.9999952 0.9999952 0.9999952 0.9999952 0.9999952 0.9999952
## [8] 0.9999952 0.9999952 0.9999952 0.9999952
=beta[3]
beta_3n_Beta10=(sd*(qnorm(1-alpha)+qnorm(1-beta_3/2))/(delta-abs(mu_Response-mu_Reference)))^2) (
## [1] 67.638586 30.061594 16.909647 10.822174 7.515398 5.521517 4.227412
## [8] 3.340177 2.705543 2.235986 1.878850
ceiling(n_Beta10)
## [1] 68 31 17 11 8 6 5 4 3 3 2
=(abs(mu_Response-mu_Reference)-delta)/(sd/sqrt(n_Beta10))
z_Beta10Power_Beta10=2*(pnorm(z_Beta10-qnorm(1-alpha))+pnorm(-z_Beta10-qnorm(1-alpha)))-1) (
## [1] 0.9000008 0.9000008 0.9000008 0.9000008 0.9000008 0.9000008 0.9000008
## [8] 0.9000008 0.9000008 0.9000008 0.9000008
<- as.data.frame(cbind(mu_Response,n_Beta30,n_Beta20,n_Beta10))
OneSampleDesign.Equivalence names(OneSampleDesign.Equivalence) <- c("Posttreatment.Mean.Bone.Density",
"Power=70%",
"Power=80%",
"Power=90%")
##################################
# Restructuring the data
##################################
<- gather(OneSampleDesign.Equivalence,
OneSampleDesign.Equivalence.Reshaped "Power=70%","Power=80%","Power=90%",
key="Power",
value="Sample.Size")
$Label <- rep("OSD_EQUIVALENCE",nrow(OneSampleDesign.Equivalence.Reshaped))
OneSampleDesign.Equivalence.Reshaped
OneSampleDesign.Equivalence.Reshaped
## Posttreatment.Mean.Bone.Density Power Sample.Size Label
## 1 2.0 Power=70% 44.933125 OSD_EQUIVALENCE
## 2 2.2 Power=70% 19.970278 OSD_EQUIVALENCE
## 3 2.4 Power=70% 11.233281 OSD_EQUIVALENCE
## 4 2.6 Power=70% 7.189300 OSD_EQUIVALENCE
## 5 2.8 Power=70% 4.992569 OSD_EQUIVALENCE
## 6 3.0 Power=70% 3.668010 OSD_EQUIVALENCE
## 7 3.2 Power=70% 2.808320 OSD_EQUIVALENCE
## 8 3.4 Power=70% 2.218920 OSD_EQUIVALENCE
## 9 3.6 Power=70% 1.797325 OSD_EQUIVALENCE
## 10 3.8 Power=70% 1.485393 OSD_EQUIVALENCE
## 11 4.0 Power=70% 1.248142 OSD_EQUIVALENCE
## 12 2.0 Power=80% 53.524046 OSD_EQUIVALENCE
## 13 2.2 Power=80% 23.788465 OSD_EQUIVALENCE
## 14 2.4 Power=80% 13.381011 OSD_EQUIVALENCE
## 15 2.6 Power=80% 8.563847 OSD_EQUIVALENCE
## 16 2.8 Power=80% 5.947116 OSD_EQUIVALENCE
## 17 3.0 Power=80% 4.369310 OSD_EQUIVALENCE
## 18 3.2 Power=80% 3.345253 OSD_EQUIVALENCE
## 19 3.4 Power=80% 2.643163 OSD_EQUIVALENCE
## 20 3.6 Power=80% 2.140962 OSD_EQUIVALENCE
## 21 3.8 Power=80% 1.769390 OSD_EQUIVALENCE
## 22 4.0 Power=80% 1.486779 OSD_EQUIVALENCE
## 23 2.0 Power=90% 67.638586 OSD_EQUIVALENCE
## 24 2.2 Power=90% 30.061594 OSD_EQUIVALENCE
## 25 2.4 Power=90% 16.909647 OSD_EQUIVALENCE
## 26 2.6 Power=90% 10.822174 OSD_EQUIVALENCE
## 27 2.8 Power=90% 7.515398 OSD_EQUIVALENCE
## 28 3.0 Power=90% 5.521517 OSD_EQUIVALENCE
## 29 3.2 Power=90% 4.227412 OSD_EQUIVALENCE
## 30 3.4 Power=90% 3.340177 OSD_EQUIVALENCE
## 31 3.6 Power=90% 2.705543 OSD_EQUIVALENCE
## 32 3.8 Power=90% 2.235986 OSD_EQUIVALENCE
## 33 4.0 Power=90% 1.878850 OSD_EQUIVALENCE
##################################
# Plotting the sample sizes and power curves
##################################
<- ggplot(OneSampleDesign.Equivalence.Reshaped,
(OneSampleDesign.Equivalence.PowerAnalysis aes(x=Posttreatment.Mean.Bone.Density,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Post-Treatment Mean Bone Density",
limits=c(2,4),
breaks=seq(2,4,
by=0.2)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50)) +
ggtitle("One-Sample Design : Test for Equivalence"))
##################################
# Consolidating the power analyses charts
##################################
<- ggplot(OneSampleDesign.Equality.Reshaped,
OSD_EQUALITY.PowerAnalysis aes(x=Posttreatment.Mean.Bone.Density,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
facet_grid(. ~ Label) +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Post-Treatment Mean Bone Density",
limits=c(2,4),
breaks=seq(2,4,by=0.2)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50))
<- ggplot(OneSampleDesign.NonInferiority.Reshaped,
OSD_NONINFERIORITY.PowerAnalysis aes(x=Posttreatment.Mean.Bone.Density,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
facet_grid(. ~ Label) +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Post-Treatment Mean Bone Density",
limits=c(2,4),
breaks=seq(2,4,by=0.2)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50))
<- ggplot(OneSampleDesign.Superiority.Reshaped,
OSD_SUPERIORITY.PowerAnalysis aes(x=Posttreatment.Mean.Bone.Density,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
facet_grid(. ~ Label) +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Post-Treatment Mean Bone Density",
limits=c(2,4),
breaks=seq(2,4,by=0.2)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50))
<- ggplot(OneSampleDesign.Equivalence.Reshaped,
OSD_EQUIVALENCE.PowerAnalysis aes(x=Posttreatment.Mean.Bone.Density,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
facet_grid(. ~ Label) +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Post-Treatment Mean Bone Density",
limits=c(2,4),
breaks=seq(2,4,by=0.2)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50))
<- ggarrange(OSD_EQUALITY.PowerAnalysis,
OSD_PowerAnalysis
OSD_NONINFERIORITY.PowerAnalysis,
OSD_SUPERIORITY.PowerAnalysis,
OSD_EQUIVALENCE.PowerAnalysis,ncol=2, nrow=2)
annotate_figure(OSD_PowerAnalysis,
top = text_grob("One-Sample Design Power Analysis by Statistical Hypothesis",
color = "black",
face = "bold",
size = 14))
##################################
# Defining a range of possible hypothesized values for epsilon
##################################
=seq(6,12,1)
epsilon
##################################
# Defining a fixed hypothesized value for the standard deviation
##################################
=10
sd
##################################
# Defining a fixed hypothesized value for the Type I error
##################################
=0.05
alpha
##################################
# Defining a range of possible hypothesized values for the Type II error
##################################
=c(0.30,0.20,0.10)
beta
##################################
# Defining a fixed hypothesized value for kappa
##################################
=1.0
kappa
##################################
# Computing the range of samples sizes based on different levels of Type II errors
##################################
=beta[1]
beta_1n_Beta30=(1+1/kappa)*(sd*(qnorm(1-alpha/2)+qnorm(1-beta_1))/(epsilon))^2) (
## [1] 34.289261 25.192110 19.287709 15.239671 12.344134 10.201764 8.572315
ceiling(n_Beta30)
## [1] 35 26 20 16 13 11 9
=(epsilon)/(sd*sqrt((1+1/kappa)/n_Beta30))
z_Beta30Power_Beta30=pnorm(z_Beta30-qnorm(1-alpha/2))+pnorm(-z_Beta30-qnorm(1-alpha/2))) (
## [1] 0.7000044 0.7000044 0.7000044 0.7000044 0.7000044 0.7000044 0.7000044
=beta[2]
beta_2n_Beta20=(1+1/kappa)*(sd*(qnorm(1-alpha/2)+qnorm(1-beta_2))/(epsilon))^2) (
## [1] 43.60489 32.03624 24.52775 19.37995 15.69776 12.97335 10.90122
ceiling(n_Beta20)
## [1] 44 33 25 20 16 13 11
=(epsilon)/(sd*sqrt((1+1/kappa)/n_Beta20))
z_Beta20Power_Beta20=pnorm(z_Beta20-qnorm(1-alpha/2))+pnorm(-z_Beta20-qnorm(1-alpha/2))) (
## [1] 0.800001 0.800001 0.800001 0.800001 0.800001 0.800001 0.800001
=beta[3]
beta_3n_Beta10=(1+1/kappa)*(sd*(qnorm(1-alpha/2)+qnorm(1-beta_3))/(epsilon))^2) (
## [1] 58.37457 42.88744 32.83570 25.94425 21.01485 17.36764 14.59364
ceiling(n_Beta10)
## [1] 59 43 33 26 22 18 15
=(epsilon)/(sd*sqrt((1+1/kappa)/n_Beta10))
z_Beta10Power_Beta10=pnorm(z_Beta10-qnorm(1-alpha/2))+pnorm(-z_Beta10-qnorm(1-alpha/2))) (
## [1] 0.9000001 0.9000001 0.9000001 0.9000001 0.9000001 0.9000001 0.9000001
<- as.data.frame(cbind(epsilon,n_Beta30,n_Beta20,n_Beta10))
TwoSampleDesign.Equality names(TwoSampleDesign.Equality) <- c("Mean.LDL.Percent.Change",
"Power=70%",
"Power=80%",
"Power=90%")
##################################
# Restructuring the data
##################################
<- gather(TwoSampleDesign.Equality,
TwoSampleDesign.Equality.Reshaped "Power=70%","Power=80%","Power=90%",
key="Power",
value="Sample.Size")
$Label <- rep("TSD_EQUALITY",nrow(TwoSampleDesign.Equality.Reshaped))
TwoSampleDesign.Equality.Reshaped
TwoSampleDesign.Equality.Reshaped
## Mean.LDL.Percent.Change Power Sample.Size Label
## 1 6 Power=70% 34.289261 TSD_EQUALITY
## 2 7 Power=70% 25.192110 TSD_EQUALITY
## 3 8 Power=70% 19.287709 TSD_EQUALITY
## 4 9 Power=70% 15.239671 TSD_EQUALITY
## 5 10 Power=70% 12.344134 TSD_EQUALITY
## 6 11 Power=70% 10.201764 TSD_EQUALITY
## 7 12 Power=70% 8.572315 TSD_EQUALITY
## 8 6 Power=80% 43.604887 TSD_EQUALITY
## 9 7 Power=80% 32.036244 TSD_EQUALITY
## 10 8 Power=80% 24.527749 TSD_EQUALITY
## 11 9 Power=80% 19.379950 TSD_EQUALITY
## 12 10 Power=80% 15.697759 TSD_EQUALITY
## 13 11 Power=80% 12.973355 TSD_EQUALITY
## 14 12 Power=80% 10.901222 TSD_EQUALITY
## 15 6 Power=90% 58.374573 TSD_EQUALITY
## 16 7 Power=90% 42.887441 TSD_EQUALITY
## 17 8 Power=90% 32.835697 TSD_EQUALITY
## 18 9 Power=90% 25.944254 TSD_EQUALITY
## 19 10 Power=90% 21.014846 TSD_EQUALITY
## 20 11 Power=90% 17.367641 TSD_EQUALITY
## 21 12 Power=90% 14.593643 TSD_EQUALITY
##################################
# Plotting the sample sizes and power curves
##################################
<- ggplot(TwoSampleDesign.Equality.Reshaped,
(TwoSampleDesign.Equality.PowerAnalysis aes(x=Mean.LDL.Percent.Change,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Mean LDL Percent Change",
limits=c(6,12),
breaks=seq(6,12,by=1)) +
scale_y_continuous(name="Sample Size Per Group",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50)) +
ggtitle("Two-Sample Design : Test for Equality"))
##################################
# Defining a range of possible hypothesized values for epsilon
##################################
=seq(6,12,1)
epsilon
##################################
# Defining a fixed hypothesized value for the standard deviation
##################################
=10
sd
##################################
# Defining a fixed hypothesized value for the Type I error
##################################
=0.05
alpha
##################################
# Defining a range of possible hypothesized values for the Type II error
##################################
=c(0.30,0.20,0.10)
beta
##################################
# Defining a fixed hypothesized value for kappa
##################################
=1.0
kappa
##################################
# Defining the non-inferiority delta
##################################
=-5
delta
##################################
# Computing the range of samples sizes based on different levels of Type II errors
##################################
=beta[1]
beta_1n_Beta30=(1+1/kappa)*(sd*(qnorm(1-alpha)+qnorm(1-beta_1))/(epsilon-delta))^2) (
## [1] 7.777956 6.535644 5.568833 4.801697 4.182812 3.676300 3.256515
ceiling(n_Beta30)
## [1] 8 7 6 5 5 4 4
=(epsilon-delta)/(sd*sqrt((1+1/kappa)/n_Beta30))
z_Beta30Power_Beta30=pnorm(z_Beta30-qnorm(1-alpha))+pnorm(-z_Beta30-qnorm(1-alpha))) (
## [1] 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683
=beta[2]
beta_2n_Beta20=(1+1/kappa)*(sd*(qnorm(1-alpha)+qnorm(1-beta_2))/(epsilon-delta))^2) (
## [1] 10.219103 8.586885 7.316636 6.308732 5.495606 4.830123 4.278586
ceiling(n_Beta20)
## [1] 11 9 8 7 6 5 5
=(epsilon-delta)/(sd*sqrt((1+1/kappa)/n_Beta20))
z_Beta20Power_Beta20=pnorm(z_Beta20-qnorm(1-alpha))+pnorm(-z_Beta20-qnorm(1-alpha))) (
## [1] 0.800018 0.800018 0.800018 0.800018 0.800018 0.800018 0.800018
=beta[3]
beta_3n_Beta10=(1+1/kappa)*(sd*(qnorm(1-alpha)+qnorm(1-beta_3))/(epsilon-delta))^2) (
## [1] 14.155120 11.894232 10.134731 8.738620 7.612309 6.690506 5.926538
ceiling(n_Beta10)
## [1] 15 12 11 9 8 7 6
=(epsilon-delta)/(sd*sqrt((1+1/kappa)/n_Beta10))
z_Beta10Power_Beta10=pnorm(z_Beta10-qnorm(1-alpha))+pnorm(-z_Beta10-qnorm(1-alpha))) (
## [1] 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024
<- as.data.frame(cbind(epsilon,n_Beta30,n_Beta20,n_Beta10))
TwoSampleDesign.NonInferiority names(TwoSampleDesign.NonInferiority) <- c("Mean.LDL.Percent.Change",
"Power=70%",
"Power=80%",
"Power=90%")
##################################
# Restructuring the data
##################################
<- gather(TwoSampleDesign.NonInferiority,
TwoSampleDesign.NonInferiority.Reshaped "Power=70%","Power=80%","Power=90%",
key="Power",
value="Sample.Size")
$Label <- rep("TSD_NONINFERIORITY",nrow(TwoSampleDesign.NonInferiority.Reshaped))
TwoSampleDesign.NonInferiority.Reshaped
TwoSampleDesign.NonInferiority.Reshaped
## Mean.LDL.Percent.Change Power Sample.Size Label
## 1 6 Power=70% 7.777956 TSD_NONINFERIORITY
## 2 7 Power=70% 6.535644 TSD_NONINFERIORITY
## 3 8 Power=70% 5.568833 TSD_NONINFERIORITY
## 4 9 Power=70% 4.801697 TSD_NONINFERIORITY
## 5 10 Power=70% 4.182812 TSD_NONINFERIORITY
## 6 11 Power=70% 3.676300 TSD_NONINFERIORITY
## 7 12 Power=70% 3.256515 TSD_NONINFERIORITY
## 8 6 Power=80% 10.219103 TSD_NONINFERIORITY
## 9 7 Power=80% 8.586885 TSD_NONINFERIORITY
## 10 8 Power=80% 7.316636 TSD_NONINFERIORITY
## 11 9 Power=80% 6.308732 TSD_NONINFERIORITY
## 12 10 Power=80% 5.495606 TSD_NONINFERIORITY
## 13 11 Power=80% 4.830123 TSD_NONINFERIORITY
## 14 12 Power=80% 4.278586 TSD_NONINFERIORITY
## 15 6 Power=90% 14.155120 TSD_NONINFERIORITY
## 16 7 Power=90% 11.894232 TSD_NONINFERIORITY
## 17 8 Power=90% 10.134731 TSD_NONINFERIORITY
## 18 9 Power=90% 8.738620 TSD_NONINFERIORITY
## 19 10 Power=90% 7.612309 TSD_NONINFERIORITY
## 20 11 Power=90% 6.690506 TSD_NONINFERIORITY
## 21 12 Power=90% 5.926538 TSD_NONINFERIORITY
##################################
# Plotting the sample sizes and power curves
##################################
<-ggplot(TwoSampleDesign.NonInferiority.Reshaped,
(TwoSampleDesign.NonInferiority.PowerAnalysis aes(x=Mean.LDL.Percent.Change,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Mean LDL Percent Change",
limits=c(6,12),
breaks=seq(6,12,by=1)) +
scale_y_continuous(name="Sample Size Per Group",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50)) +
ggtitle("Two-Sample Design : Test for Non-Inferiority"))
##################################
# Defining a range of possible hypothesized values for epsilon
##################################
=seq(6,12,1)
epsilon
##################################
# Defining a fixed hypothesized value for the standard deviation
##################################
=10
sd
##################################
# Defining a fixed hypothesized value for the Type I error
##################################
=0.05
alpha
##################################
# Defining a range of possible hypothesized values for the Type II error
##################################
=c(0.30,0.20,0.10)
beta
##################################
# Defining a fixed hypothesized value for kappa
##################################
=1.0
kappa
##################################
# Defining the superiority delta
##################################
=1
delta
##################################
# Computing the range of samples sizes based on different levels of Type II errors
##################################
=beta[1]
beta_1n_Beta30=(1+1/kappa)*(sd*(qnorm(1-alpha)+qnorm(1-beta_1))/(epsilon-delta))^2) (
## [1] 37.645308 26.142575 19.206790 14.705199 11.618922 9.411327 7.777956
ceiling(n_Beta30)
## [1] 38 27 20 15 12 10 8
=(epsilon-delta)/(sd*sqrt((1+1/kappa)/n_Beta30))
z_Beta30Power_Beta30=pnorm(z_Beta30-qnorm(1-alpha))+pnorm(-z_Beta30-qnorm(1-alpha))) (
## [1] 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683 0.7000683
=beta[2]
beta_2n_Beta20=(1+1/kappa)*(sd*(qnorm(1-alpha)+qnorm(1-beta_2))/(epsilon-delta))^2) (
## [1] 49.46046 34.34754 25.23493 19.32049 15.26557 12.36511 10.21910
ceiling(n_Beta20)
## [1] 50 35 26 20 16 13 11
=(epsilon-delta)/(sd*sqrt((1+1/kappa)/n_Beta20))
z_Beta20Power_Beta20=pnorm(z_Beta20-qnorm(1-alpha))+pnorm(-z_Beta20-qnorm(1-alpha))) (
## [1] 0.800018 0.800018 0.800018 0.800018 0.800018 0.800018 0.800018
=beta[3]
beta_3n_Beta10=(1+1/kappa)*(sd*(qnorm(1-alpha)+qnorm(1-beta_3))/(epsilon-delta))^2) (
## [1] 68.51078 47.57693 34.95448 26.76202 21.14530 17.12769 14.15512
ceiling(n_Beta10)
## [1] 69 48 35 27 22 18 15
=(epsilon-delta)/(sd*sqrt((1+1/kappa)/n_Beta10))
z_Beta10Power_Beta10=pnorm(z_Beta10-qnorm(1-alpha))+pnorm(-z_Beta10-qnorm(1-alpha))) (
## [1] 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024 0.9000024
<- as.data.frame(cbind(epsilon,n_Beta30,n_Beta20,n_Beta10))
TwoSampleDesign.Superiority names(TwoSampleDesign.Superiority) <- c("Mean.LDL.Percent.Change",
"Power=70%",
"Power=80%",
"Power=90%")
##################################
# Restructuring the data
##################################
<- gather(TwoSampleDesign.Superiority,
TwoSampleDesign.Superiority.Reshaped "Power=70%","Power=80%","Power=90%",
key="Power",
value="Sample.Size")
$Label <- rep("TSD_SUPERIORITY",nrow(TwoSampleDesign.Superiority.Reshaped))
TwoSampleDesign.Superiority.Reshaped
TwoSampleDesign.Superiority.Reshaped
## Mean.LDL.Percent.Change Power Sample.Size Label
## 1 6 Power=70% 37.645308 TSD_SUPERIORITY
## 2 7 Power=70% 26.142575 TSD_SUPERIORITY
## 3 8 Power=70% 19.206790 TSD_SUPERIORITY
## 4 9 Power=70% 14.705199 TSD_SUPERIORITY
## 5 10 Power=70% 11.618922 TSD_SUPERIORITY
## 6 11 Power=70% 9.411327 TSD_SUPERIORITY
## 7 12 Power=70% 7.777956 TSD_SUPERIORITY
## 8 6 Power=80% 49.460458 TSD_SUPERIORITY
## 9 7 Power=80% 34.347540 TSD_SUPERIORITY
## 10 8 Power=80% 25.234927 TSD_SUPERIORITY
## 11 9 Power=80% 19.320491 TSD_SUPERIORITY
## 12 10 Power=80% 15.265573 TSD_SUPERIORITY
## 13 11 Power=80% 12.365114 TSD_SUPERIORITY
## 14 12 Power=80% 10.219103 TSD_SUPERIORITY
## 15 6 Power=90% 68.510779 TSD_SUPERIORITY
## 16 7 Power=90% 47.576930 TSD_SUPERIORITY
## 17 8 Power=90% 34.954479 TSD_SUPERIORITY
## 18 9 Power=90% 26.762023 TSD_SUPERIORITY
## 19 10 Power=90% 21.145302 TSD_SUPERIORITY
## 20 11 Power=90% 17.127695 TSD_SUPERIORITY
## 21 12 Power=90% 14.155120 TSD_SUPERIORITY
##################################
# Plotting the sample sizes and power curves
##################################
<-ggplot(TwoSampleDesign.Superiority.Reshaped,
(TwoSampleDesign.Superiority.PowerAnalysis aes(x=Mean.LDL.Percent.Change,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Mean LDL Percent Change",
limits=c(6,12),
breaks=seq(6,12,by=1)) +
scale_y_continuous(name="Sample Size Per Group",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50)) +
ggtitle("Two-Sample Design : Test for Superiority"))
##################################
# Defining a range of possible hypothesized values for epsilon
##################################
=seq(6,12,1)
epsilon
##################################
# Defining a fixed hypothesized value for the standard deviation
##################################
=10
sd
##################################
# Defining a fixed hypothesized value for the Type I error
##################################
=0.05
alpha
##################################
# Defining a range of possible hypothesized values for the Type II error
##################################
=c(0.30,0.20,0.10)
beta
##################################
# Defining a fixed hypothesized value for kappa
##################################
=1.0
kappa
##################################
# Defining the equivalence delta
##################################
=1
delta
##################################
# Computing the range of samples sizes based on different levels of Type II errors
##################################
=beta[1]
beta_1n_Beta30=(1+1/kappa)*(sd*(qnorm(1-alpha)+qnorm(1-beta_1/2))/(abs(epsilon)-delta))^2) (
## [1] 57.51440 39.94056 29.34408 22.46656 17.75136 14.37860 11.88314
ceiling(n_Beta30)
## [1] 58 40 30 23 18 15 12
=(abs(epsilon)-delta)/(sd*sqrt((1+1/kappa)/n_Beta30))
z_Beta30Power_Beta30=2*(pnorm(z_Beta30-qnorm(1-alpha))+pnorm(-z_Beta30-qnorm(1-alpha)))-1) (
## [1] 0.7000152 0.7000152 0.7000152 0.7000152 0.7000152 0.7000152 0.7000152
=beta[2]
beta_2n_Beta20=(1+1/kappa)*(sd*(qnorm(1-alpha)+qnorm(1-beta_2/2))/(abs(epsilon)-delta))^2) (
## [1] 68.51078 47.57693 34.95448 26.76202 21.14530 17.12769 14.15512
ceiling(n_Beta20)
## [1] 69 48 35 27 22 18 15
=(abs(epsilon)-delta)/(sd*sqrt((1+1/kappa)/n_Beta20))
z_Beta20Power_Beta20=2*(pnorm(z_Beta20-qnorm(1-alpha))+pnorm(-z_Beta20-qnorm(1-alpha)))-1) (
## [1] 0.8000048 0.8000048 0.8000048 0.8000048 0.8000048 0.8000048 0.8000048
=beta[3]
beta_3n_Beta10=(1+1/kappa)*(sd*(qnorm(1-alpha)+qnorm(1-beta_3/2))/(abs(epsilon)-delta))^2) (
## [1] 86.57739 60.12319 44.17214 33.81929 26.72142 21.64435 17.88789
ceiling(n_Beta10)
## [1] 87 61 45 34 27 22 18
=(abs(epsilon)-delta)/(sd*sqrt((1+1/kappa)/n_Beta10))
z_Beta10Power_Beta10=2*(pnorm(z_Beta10-qnorm(1-alpha))+pnorm(-z_Beta10-qnorm(1-alpha)))-1) (
## [1] 0.9000008 0.9000008 0.9000008 0.9000008 0.9000008 0.9000008 0.9000008
<- as.data.frame(cbind(epsilon,n_Beta30,n_Beta20,n_Beta10))
TwoSampleDesign.Equivalence names(TwoSampleDesign.Equivalence) <- c("Mean.LDL.Percent.Change",
"Power=70%",
"Power=80%",
"Power=90%")
##################################
# Restructuring the data
##################################
<- gather(TwoSampleDesign.Equivalence,
TwoSampleDesign.Equivalence.Reshaped "Power=70%","Power=80%","Power=90%",
key="Power",
value="Sample.Size")
$Label <- rep("TSD_EQUIVALENCE",nrow(TwoSampleDesign.Equivalence.Reshaped))
TwoSampleDesign.Equivalence.Reshaped
TwoSampleDesign.Equivalence.Reshaped
## Mean.LDL.Percent.Change Power Sample.Size Label
## 1 6 Power=70% 57.51440 TSD_EQUIVALENCE
## 2 7 Power=70% 39.94056 TSD_EQUIVALENCE
## 3 8 Power=70% 29.34408 TSD_EQUIVALENCE
## 4 9 Power=70% 22.46656 TSD_EQUIVALENCE
## 5 10 Power=70% 17.75136 TSD_EQUIVALENCE
## 6 11 Power=70% 14.37860 TSD_EQUIVALENCE
## 7 12 Power=70% 11.88314 TSD_EQUIVALENCE
## 8 6 Power=80% 68.51078 TSD_EQUIVALENCE
## 9 7 Power=80% 47.57693 TSD_EQUIVALENCE
## 10 8 Power=80% 34.95448 TSD_EQUIVALENCE
## 11 9 Power=80% 26.76202 TSD_EQUIVALENCE
## 12 10 Power=80% 21.14530 TSD_EQUIVALENCE
## 13 11 Power=80% 17.12769 TSD_EQUIVALENCE
## 14 12 Power=80% 14.15512 TSD_EQUIVALENCE
## 15 6 Power=90% 86.57739 TSD_EQUIVALENCE
## 16 7 Power=90% 60.12319 TSD_EQUIVALENCE
## 17 8 Power=90% 44.17214 TSD_EQUIVALENCE
## 18 9 Power=90% 33.81929 TSD_EQUIVALENCE
## 19 10 Power=90% 26.72142 TSD_EQUIVALENCE
## 20 11 Power=90% 21.64435 TSD_EQUIVALENCE
## 21 12 Power=90% 17.88789 TSD_EQUIVALENCE
##################################
# Plotting the sample sizes and power curves
##################################
<-ggplot(TwoSampleDesign.Equivalence.Reshaped,
(TwoSampleDesign.Equivalence.PowerAnalysis aes(x=Mean.LDL.Percent.Change,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Mean LDL Percent Change",
limits=c(6,12),
breaks=seq(6,12,by=1)) +
scale_y_continuous(name="Sample Size Per Group",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50)) +
ggtitle("Two-Sample Design : Test for Equivalence"))
##################################
# Consolidating the power analyses charts
##################################
<- ggplot(TwoSampleDesign.Equality.Reshaped,
TSD_EQUALITY.PowerAnalysis aes(x=Mean.LDL.Percent.Change,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
facet_grid(. ~ Label) +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Mean LDL Percent Change",
limits=c(6,12),
breaks=seq(6,12,by=1)) +
scale_y_continuous(name="Sample Size Per Group",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50))
<- ggplot(TwoSampleDesign.NonInferiority.Reshaped,
TSD_NONINFERIORITY.PowerAnalysis aes(x=Mean.LDL.Percent.Change,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
facet_grid(. ~ Label) +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Mean LDL Percent Change",
limits=c(6,12),
breaks=seq(6,12,by=1)) +
scale_y_continuous(name="Sample Size Per Group",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50))
<- ggplot(TwoSampleDesign.Superiority.Reshaped,
TSD_SUPERIORITY.PowerAnalysis aes(x=Mean.LDL.Percent.Change,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
facet_grid(. ~ Label) +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Mean LDL Percent Change",
limits=c(6,12),
breaks=seq(6,12,by=1)) +
scale_y_continuous(name="Sample Size Per Group",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50))
<- ggplot(TwoSampleDesign.Equivalence.Reshaped,
TSD_EQUIVALENCE.PowerAnalysis aes(x=Mean.LDL.Percent.Change,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
facet_grid(. ~ Label) +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Mean LDL Percent Change",
limits=c(6,12),
breaks=seq(6,12,by=1)) +
scale_y_continuous(name="Sample Size Per Group",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50))
<- ggarrange(TSD_EQUALITY.PowerAnalysis,
TSD_PowerAnalysis
TSD_NONINFERIORITY.PowerAnalysis,
TSD_SUPERIORITY.PowerAnalysis,
TSD_EQUIVALENCE.PowerAnalysis,ncol=2, nrow=2)
annotate_figure(TSD_PowerAnalysis,
top = text_grob("Two-Sample Design Power Analysis by Statistical Hypothesis",
color = "black",
face = "bold",
size = 14))
##################################
# Defining a fixed hypothesized value for mu_A
##################################
=9.0
mu_AF
##################################
# Defining a range of hypothesized values for mu_B
##################################
=seq(11.0,13.0,0.5)
mu_BR##################################
# Defining a fixed hypothesized value for mu_B
# using the highest value from the range
##################################
=13.0
mu_BF
##################################
# Defining a range of hypothesized value for mu_C
##################################
=seq(15.0,17.0,0.5)
mu_CR
##################################
# Defining a fixed hypothesized value for the standard deviation
##################################
=3.5
sd
##################################
# Defining a fixed hypothesized value for the Type I error
##################################
=0.05
alpha
##################################
# Defining a range of possible hypothesized values for the Type II error
##################################
=c(0.30,0.20,0.10)
beta
##################################
# Defining the number of treatments
##################################
=3
tau
##################################
# Conducting a pairwise comparison
# between Treatments A and B
##################################
##################################
# Computing the range of samples sizes based on different levels of Type II errors
##################################
=beta[1]
beta_1n_Beta30=2*(sd*(qnorm(1-alpha/(2/tau))+qnorm(1-beta_1))/(mu_BR-mu_AF))^2) (
## [1] 23.624302 15.119553 10.499690 7.714058 5.906075
ceiling(n_Beta30)
## [1] 24 16 11 8 6
=(mu_BR-mu_AF)/(sd*sqrt(2/n_Beta30))
z_Beta30Power_Beta30=pnorm(z_Beta30-qnorm(1-alpha/(2/tau)))+pnorm(-z_Beta30-qnorm(1-alpha/(2/tau)))) (
## [1] 0.7003327 0.7003327 0.7003327 0.7003327 0.7003327
=beta[2]
beta_2n_Beta20=2*(sd*(qnorm(1-alpha/(2/tau))+qnorm(1-beta_2))/(mu_BR-mu_AF))^2) (
## [1] 31.872403 20.398338 14.165513 10.407315 7.968101
ceiling(n_Beta20)
## [1] 32 21 15 11 8
=(mu_BR-mu_AF)/(sd*sqrt(2/n_Beta20))
z_Beta20Power_Beta20=pnorm(z_Beta20-qnorm(1-alpha/(2/tau)))+pnorm(-z_Beta20-qnorm(1-alpha/(2/tau)))) (
## [1] 0.8000993 0.8000993 0.8000993 0.8000993 0.8000993
=beta[3]
beta_3n_Beta10=2*(sd*(qnorm(1-alpha/(2/tau))+qnorm(1-beta_3))/(mu_BR-mu_AF))^2) (
## [1] 45.35129 29.02483 20.15613 14.80859 11.33782
ceiling(n_Beta10)
## [1] 46 30 21 15 12
=(mu_BR-mu_AF)/(sd*sqrt(2/n_Beta10))
z_Beta10Power_Beta10=pnorm(z_Beta10-qnorm(1-alpha/(2/tau)))+pnorm(-z_Beta10-qnorm(1-alpha/(2/tau)))) (
## [1] 0.9000159 0.9000159 0.9000159 0.9000159 0.9000159
<- as.data.frame(cbind(mu_BR,n_Beta30,n_Beta20,n_Beta10))
MultipleSampleDesign.AB.Equality names(MultipleSampleDesign.AB.Equality) <- c("Clinical.Response",
"Power=70%",
"Power=80%",
"Power=90%")
##################################
# Restructuring the data
##################################
<- gather(MultipleSampleDesign.AB.Equality,
MultipleSampleDesign.AB.Equality.Reshaped "Power=70%","Power=80%","Power=90%",
key="Power",
value="Sample.Size")
$Label <- rep("MSDOWAP_AB_EQUALITY",nrow(MultipleSampleDesign.AB.Equality.Reshaped))
MultipleSampleDesign.AB.Equality.Reshaped
MultipleSampleDesign.AB.Equality.Reshaped
## Clinical.Response Power Sample.Size Label
## 1 11.0 Power=70% 23.624302 MSDOWAP_AB_EQUALITY
## 2 11.5 Power=70% 15.119553 MSDOWAP_AB_EQUALITY
## 3 12.0 Power=70% 10.499690 MSDOWAP_AB_EQUALITY
## 4 12.5 Power=70% 7.714058 MSDOWAP_AB_EQUALITY
## 5 13.0 Power=70% 5.906075 MSDOWAP_AB_EQUALITY
## 6 11.0 Power=80% 31.872403 MSDOWAP_AB_EQUALITY
## 7 11.5 Power=80% 20.398338 MSDOWAP_AB_EQUALITY
## 8 12.0 Power=80% 14.165513 MSDOWAP_AB_EQUALITY
## 9 12.5 Power=80% 10.407315 MSDOWAP_AB_EQUALITY
## 10 13.0 Power=80% 7.968101 MSDOWAP_AB_EQUALITY
## 11 11.0 Power=90% 45.351294 MSDOWAP_AB_EQUALITY
## 12 11.5 Power=90% 29.024828 MSDOWAP_AB_EQUALITY
## 13 12.0 Power=90% 20.156131 MSDOWAP_AB_EQUALITY
## 14 12.5 Power=90% 14.808586 MSDOWAP_AB_EQUALITY
## 15 13.0 Power=90% 11.337823 MSDOWAP_AB_EQUALITY
##################################
# Plotting the sample sizes and power curves
##################################
<- ggplot(MultipleSampleDesign.AB.Equality.Reshaped,
(MultipleSampleDesign.AB.Equality.PowerAnalysis aes(x=Clinical.Response,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Clinical Response",
limits=c(11,13),
breaks=seq(11,13,by=0.5)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50)) +
ggtitle("Multiple-Sample Design : Test for Equality Between A Versus B"))
##################################
# Conducting a pairwise comparison
# between Treatments A and C
##################################
##################################
# Computing the range of samples sizes based on different levels of Type II errors
##################################
=beta[1]
beta_1n_Beta30=2*(sd*(qnorm(1-alpha/(2/tau))+qnorm(1-beta_1))/(mu_CR-mu_AF))^2) (
## [1] 2.624922 2.236620 1.928514 1.679950 1.476519
ceiling(n_Beta30)
## [1] 3 3 2 2 2
=(mu_CR-mu_AF)/(sd*sqrt(2/n_Beta30))
z_Beta30Power_Beta30=pnorm(z_Beta30-qnorm(1-alpha/(2/tau)))+pnorm(-z_Beta30-qnorm(1-alpha/(2/tau)))) (
## [1] 0.7003327 0.7003327 0.7003327 0.7003327 0.7003327
=beta[2]
beta_2n_Beta20=2*(sd*(qnorm(1-alpha/(2/tau))+qnorm(1-beta_2))/(mu_CR-mu_AF))^2) (
## [1] 3.541378 3.017506 2.601829 2.266482 1.992025
ceiling(n_Beta20)
## [1] 4 4 3 3 2
=(mu_CR-mu_AF)/(sd*sqrt(2/n_Beta20))
z_Beta20Power_Beta20=pnorm(z_Beta20-qnorm(1-alpha/(2/tau)))+pnorm(-z_Beta20-qnorm(1-alpha/(2/tau)))) (
## [1] 0.8000993 0.8000993 0.8000993 0.8000993 0.8000993
=beta[3]
beta_3n_Beta10=2*(sd*(qnorm(1-alpha/(2/tau))+qnorm(1-beta_3))/(mu_CR-mu_AF))^2) (
## [1] 5.039033 4.293614 3.702146 3.224981 2.834456
ceiling(n_Beta10)
## [1] 6 5 4 4 3
=(mu_CR-mu_AF)/(sd*sqrt(2/n_Beta10))
z_Beta10Power_Beta10=pnorm(z_Beta10-qnorm(1-alpha/(2/tau)))+pnorm(-z_Beta10-qnorm(1-alpha/(2/tau)))) (
## [1] 0.9000159 0.9000159 0.9000159 0.9000159 0.9000159
<- as.data.frame(cbind(mu_CR,n_Beta30,n_Beta20,n_Beta10))
MultipleSampleDesign.AC.Equality names(MultipleSampleDesign.AC.Equality) <- c("Clinical.Response",
"Power=70%",
"Power=80%",
"Power=90%")
##################################
# Restructuring the data
##################################
<- gather(MultipleSampleDesign.AC.Equality,
MultipleSampleDesign.AC.Equality.Reshaped "Power=70%","Power=80%","Power=90%",
key="Power",
value="Sample.Size")
$Label <- rep("MSDOWAP_AC_EQUALITY",nrow(MultipleSampleDesign.AC.Equality.Reshaped))
MultipleSampleDesign.AC.Equality.Reshaped
MultipleSampleDesign.AC.Equality.Reshaped
## Clinical.Response Power Sample.Size Label
## 1 15.0 Power=70% 2.624922 MSDOWAP_AC_EQUALITY
## 2 15.5 Power=70% 2.236620 MSDOWAP_AC_EQUALITY
## 3 16.0 Power=70% 1.928514 MSDOWAP_AC_EQUALITY
## 4 16.5 Power=70% 1.679950 MSDOWAP_AC_EQUALITY
## 5 17.0 Power=70% 1.476519 MSDOWAP_AC_EQUALITY
## 6 15.0 Power=80% 3.541378 MSDOWAP_AC_EQUALITY
## 7 15.5 Power=80% 3.017506 MSDOWAP_AC_EQUALITY
## 8 16.0 Power=80% 2.601829 MSDOWAP_AC_EQUALITY
## 9 16.5 Power=80% 2.266482 MSDOWAP_AC_EQUALITY
## 10 17.0 Power=80% 1.992025 MSDOWAP_AC_EQUALITY
## 11 15.0 Power=90% 5.039033 MSDOWAP_AC_EQUALITY
## 12 15.5 Power=90% 4.293614 MSDOWAP_AC_EQUALITY
## 13 16.0 Power=90% 3.702146 MSDOWAP_AC_EQUALITY
## 14 16.5 Power=90% 3.224981 MSDOWAP_AC_EQUALITY
## 15 17.0 Power=90% 2.834456 MSDOWAP_AC_EQUALITY
##################################
# Plotting the sample sizes and power curves
##################################
<- ggplot(MultipleSampleDesign.AC.Equality.Reshaped,
(MultipleSampleDesign.AC.Equality.PowerAnalysis aes(x=Clinical.Response,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Clinical Response",
limits=c(15,17),
breaks=seq(15,17,by=0.5)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50)) +
ggtitle("Multiple-Sample Design : Test for Equality Between A Versus C"))
##################################
# Conducting a pairwise comparison
# between Treatments B and C
##################################
##################################
# Computing the range of samples sizes based on different levels of Type II errors
##################################
=beta[1]
beta_1n_Beta30=2*(sd*(qnorm(1-alpha/(2/tau))+qnorm(1-beta_1))/(mu_CR-mu_BF))^2) (
## [1] 23.624302 15.119553 10.499690 7.714058 5.906075
ceiling(n_Beta30)
## [1] 24 16 11 8 6
=(mu_CR-mu_BF)/(sd*sqrt(2/n_Beta30))
z_Beta30Power_Beta30=pnorm(z_Beta30-qnorm(1-alpha/(2/tau)))+pnorm(-z_Beta30-qnorm(1-alpha/(2/tau)))) (
## [1] 0.7003327 0.7003327 0.7003327 0.7003327 0.7003327
=beta[2]
beta_2n_Beta20=2*(sd*(qnorm(1-alpha/(2/tau))+qnorm(1-beta_2))/(mu_CR-mu_BF))^2) (
## [1] 31.872403 20.398338 14.165513 10.407315 7.968101
ceiling(n_Beta20)
## [1] 32 21 15 11 8
=(mu_CR-mu_BF)/(sd*sqrt(2/n_Beta20))
z_Beta20Power_Beta20=pnorm(z_Beta20-qnorm(1-alpha/(2/tau)))+pnorm(-z_Beta20-qnorm(1-alpha/(2/tau)))) (
## [1] 0.8000993 0.8000993 0.8000993 0.8000993 0.8000993
=beta[3]
beta_3n_Beta10=2*(sd*(qnorm(1-alpha/(2/tau))+qnorm(1-beta_3))/(mu_CR-mu_BF))^2) (
## [1] 45.35129 29.02483 20.15613 14.80859 11.33782
ceiling(n_Beta10)
## [1] 46 30 21 15 12
=(mu_CR-mu_BF)/(sd*sqrt(2/n_Beta10))
z_Beta10Power_Beta10=pnorm(z_Beta10-qnorm(1-alpha/(2/tau)))+pnorm(-z_Beta10-qnorm(1-alpha/(2/tau)))) (
## [1] 0.9000159 0.9000159 0.9000159 0.9000159 0.9000159
<- as.data.frame(cbind(mu_CR,n_Beta30,n_Beta20,n_Beta10))
MultipleSampleDesign.BC.Equality names(MultipleSampleDesign.BC.Equality) <- c("Clinical.Response",
"Power=70%",
"Power=80%",
"Power=90%")
##################################
# Restructuring the data
##################################
<- gather(MultipleSampleDesign.BC.Equality,
MultipleSampleDesign.BC.Equality.Reshaped "Power=70%","Power=80%","Power=90%",
key="Power",
value="Sample.Size")
$Label <- rep("MSDOWAP_BC_EQUALITY",nrow(MultipleSampleDesign.BC.Equality.Reshaped))
MultipleSampleDesign.BC.Equality.Reshaped
MultipleSampleDesign.BC.Equality.Reshaped
## Clinical.Response Power Sample.Size Label
## 1 15.0 Power=70% 23.624302 MSDOWAP_BC_EQUALITY
## 2 15.5 Power=70% 15.119553 MSDOWAP_BC_EQUALITY
## 3 16.0 Power=70% 10.499690 MSDOWAP_BC_EQUALITY
## 4 16.5 Power=70% 7.714058 MSDOWAP_BC_EQUALITY
## 5 17.0 Power=70% 5.906075 MSDOWAP_BC_EQUALITY
## 6 15.0 Power=80% 31.872403 MSDOWAP_BC_EQUALITY
## 7 15.5 Power=80% 20.398338 MSDOWAP_BC_EQUALITY
## 8 16.0 Power=80% 14.165513 MSDOWAP_BC_EQUALITY
## 9 16.5 Power=80% 10.407315 MSDOWAP_BC_EQUALITY
## 10 17.0 Power=80% 7.968101 MSDOWAP_BC_EQUALITY
## 11 15.0 Power=90% 45.351294 MSDOWAP_BC_EQUALITY
## 12 15.5 Power=90% 29.024828 MSDOWAP_BC_EQUALITY
## 13 16.0 Power=90% 20.156131 MSDOWAP_BC_EQUALITY
## 14 16.5 Power=90% 14.808586 MSDOWAP_BC_EQUALITY
## 15 17.0 Power=90% 11.337823 MSDOWAP_BC_EQUALITY
##################################
# Plotting the sample sizes and power curves
##################################
<- ggplot(MultipleSampleDesign.BC.Equality.Reshaped,
(MultipleSampleDesign.BC.Equality.PowerAnalysis aes(x=Clinical.Response,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Clinical Response",
limits=c(15,17),
breaks=seq(15,17,by=0.5)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50)) +
ggtitle("Multiple-Sample Design : Test for Equality Between B Versus C"))
##################################
# Consolidating the power analyses charts
##################################
<- ggplot(MultipleSampleDesign.AB.Equality.Reshaped,
MSDOWAP.AB.Equality.PowerAnalysis aes(x=Clinical.Response,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
facet_grid(. ~ Label) +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Clinical Response",
limits=c(11,13),
breaks=seq(11,13,by=0.5)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50))
<- ggplot(MultipleSampleDesign.AC.Equality.Reshaped,
MSDOWAP.AC.Equality.PowerAnalysis aes(x=Clinical.Response,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
facet_grid(. ~ Label) +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Clinical Response",
limits=c(15,17),
breaks=seq(15,17,by=0.5)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50))
<- ggplot(MultipleSampleDesign.BC.Equality.Reshaped,
MSDOWAP.BC.Equality.PowerAnalysis aes(x=Clinical.Response,
y=Sample.Size,
color=Power)) +
geom_line(size=1)+
geom_point(size=4)+
theme_bw() +
facet_grid(. ~ Label) +
scale_color_brewer(palette="Paired") +
scale_x_continuous(name="Hypothesized Clinical Response",
limits=c(15,17),
breaks=seq(15,17,by=0.5)) +
scale_y_continuous(name="Treatment Group Sample Size",
limits=c(0,100),
breaks=seq(0,100,by=10)) +
theme(axis.title.x=element_text(color="black",face="bold",size=12),
legend.position="top",
axis.title.y=element_text(color="black",face="bold",size=12),
plot.title=element_text(color="black",size=14,face="bold",hjust=0.50))
<- ggarrange(MSDOWAP.AB.Equality.PowerAnalysis,
MSDOWAP_PowerAnalysis
MSDOWAP.AC.Equality.PowerAnalysis,
MSDOWAP.BC.Equality.PowerAnalysis,ncol=2, nrow=2)
annotate_figure(MSDOWAP_PowerAnalysis,
top = text_grob("Multiple-Sample Design One-Way ANOVA Power Analysis by Pairwise Comparison",
color = "black",
face = "bold",
size = 14))