#code to illustrate different estimators for #p = P(X=1) from a sample of n binomial(k,theta) values #number of iterations I <- 100000 #parameters n <- 1000 k <- 100 theta <- .6 est1 <- 1:I est2 <- 1:I est3 <- 1:I est4 <- 1:I for(i in 1:I) { mysample <- rbinom(n,k,theta) t <- sum(mysample) est1[i] <- sum(mysample[1]==1) #Silly but unbiased estimator based on first observation est2[i] <- k*choose((n-1)*k,t-1)/choose(n*k,t) #Rao-Blackwell example from book est3[i] <- (t/n)*(1-t/(n*k))^(k-1) #MLE est4[i] <- mean(mysample==1)#empirical estimate (proportion of cases where X=1) } p <- k*theta*(1-theta)^(k-1) # estimated mean squared errors for the four estimators mse1 <- mean((est1-p)^2) mse2 <- mean((est2-p)^2) mse3 <- mean((est3-p)^2) mse4 <- mean((est4-p)^2) #Some interesting things to try: # try the following for est1, est2, est3, est4 # hist(est1,prob=T) # plot(est2,est3) # -the following line adds the line y=x to the scatterplot to compare est2 versus est3 # lines(c(0,max(est2,est3)),c(0,max(est2,est3))) # cor(est2,est3)