I didn't use macro and Yes as you said once I added (OPTIONS NONOTES; and OPTIONS NOTES;) I started to have so many warning problems. proc iml; seed=12345; k=1000; B=5000; alpha=2; lambda=1.5; a=0.05; *significance level; scheme=12;n=20; m=16; R=J(m,1,0); R[m]=n-m; con={0.1 0.1 , 2.5 2}; x0_mle={0.7 2.1}; x0_mps={1.2 1}; ** Data simulation using progressive first failure **; *****************************************************; start Data(alpha,lambda) global(seed,m,n,R); W=J(m,1,0);V=J(m,1,0);X_p=J(m,1,0); X_A=J(m,1,0); U=J(m,1,0); X=J(m,1,0); do i=1 to m; W[i]=Uniform(seed); end; S=1+R[m]; V[1]=W[1]**(1/S); do i=2 to m; S=S+(1+R[m-i+1]); V[i]=W[i]**(1/S); end; do i=1 to m; U[i]=1-prod( V[ m:( m-i+1)] );** the U's are the required progressively type II from U(0,1); END; **********************; Call sort (U); * If we sort U then X_p will be sorted; X_p=( 1-(1-U)##(1/alpha) )##(1/lambda); call sort (X_p); T=X_p[4*m/5]; *T=X_p[m]+2; * get row numbers that sort the matrix or ordered the matrix; do idx=1 to m-1; if (( X_p[idx] < T) & (X_p[idx+1] >= T)) then q=idx; end; If X_p[m]>T then do; X_1_q=J(q,1,0); X_1_q= X_p[1:q]; Xp_remaining=J(m-q,m,0); Y=J(m-q,m,0); Y=X_p[q+1:m]; **************************************************; ** Generate data of size m-q **; **************************************************; W1=J(m-q,1,0); YY=J(m-q,1,0); ***************************************************************; call randseed(seed); call randgen(W1, "Uniform",0,1); Call sort (W1); YY=( 1 - (1-W1)##(1/alpha)#(1- T**lambda) )##(1/lambda); X_A= X_1_q //YY; X=X_A; end; *end the list for first if statement; else if ( X_p[m] <= T) then do; q= m; X = X_p; end; *end the list for the else if statement; Rq=J(q,1,0); Rq=R[1:q]; j=q; Rj=Rq; Xm=X[m]; Return (X); finish; *******************************************************************************; *** MLE for the Kumaraswamy using Type II Adaptive progressive censoring ***** ; *******************************************************************************; start MLE_func(y) global (n,m,X,R,q); func=0; alpha=y[1]; lambda=y[2]; Rq=R[1:q]; Sum_log=J(m,1,0); Sum_log=log(x); Sum_log_1=J(m,1,0); Sum_log_1=log(1-X##lambda); Sum_log_q=J(q,1,0); Sum_log_q=R[1:q]#log( 1-X[1:q]##lambda ); func=func + m*log(alpha*lambda)+(lambda-1)* Sum_log[+] + (alpha-1) * Sum_log_1[+] + alpha * Sum_log_q[+] + alpha *(n-m-Rq[+])*log(1-X[m]**lambda); Return(func); finish; ***********************************************************************************************; *** Maximum Product Spacing for Kumaraswamy using Type II Adaptive progressive censoring ***** ; ***********************************************************************************************; start MPS_func(y) global (n,m,X,R,q); func=0; alpha=y[1]; lambda=y[2]; Rq=R[1:q]; R_start_q=(n-m-Rq[+]); Log_X=log(x); Sum1=0; do i=2 to m; Sum1=Sum1 + Log( ( 1-X[i-1]**lambda )** alpha - (1-X[i]**lambda )** alpha ) ; end; Sum2=alpha#Rq#Log( 1-X[1:q]##lambda ); *func=func + log( 1- (1-X[1]**lambda)** alpha ) + alpha * log( 1-X[m]**lambda ) + Sum1[+] + Sum2[+] + alpha *(n-m-Rq[+])*log(1-X[m]**lambda); func=func + log( 1.0001- (1-X[1]**lambda)** alpha ) + alpha * log( 1.0001-X[m]**lambda ) + Sum1[+] + Sum2[+] + alpha *(n-m-Rq[+])*log(1-X[m]**lambda); Return(func); finish; ********************************************************; ** alpha1= alpha_MLE; BootEST_alpha1=J(N,1,0); BootEST_alpha2=J(N,1,0); SE_alpha1 = J(N,1,0); SE_alpha2 = J(N,1,0); Length_alpha1 = J(N,1,0); Length_alpha2 = J(N,1,0); BootEST_Lambda1=J(N,1,0); BootEST_Lambda2=J(N,1,0); SE_Lambda1 = J(N,1,0); SE_Lambda2 = J(N,1,0); Length_Lambda1 = J(N,1,0); Length_Lambda2 = J(N,1,0); **********************************************************; **alpha2= alpha_MPS; *********************************************************; %OPTIONS NONOTES; Do J=1 to N; X=J(m,1,0); ********************************************; *** Bootstrap Steps ***; ********************************************; Step1: ************; tc={10000 14000}; opt={1}; *con={0.05 0.05 , 2 1.3}; X=Data(alpha,lambda); T= X[4*m/5]; do idx=1 to m-1; if (( X[idx] < T) & (X[idx+1] >= T)) then q=idx; end; if ( X[m] <= T) then q= m; ************************* Find MLE ************************; * x0_MLE={2.10 0.80}; Call nlpnra(rc, MLE_ret, "MLE_func", x0_mle, opt, con,tc); alpha_mle = MLE_ret[1]; lambda_mle = MLE_ret[2]; ************************* Find MPS ************************; * x0_MPS={2.10 0.80}; call nlpqn(rc, MPS_ret, "MPS_func", x0_mps, opt, Con,tc); alpha_MPS = MPS_ret[1]; lambda_MPS = MPS_ret[2]; Step2: ************; B_alpha1 = J(B,1,0); B_Lambda1 = J(B,1,0); B_alpha2 = J(B,1,0); B_Lambda2 = J(B,1,0); LL_alpha1 = J(B,1,0); UU_alpha1 = J(B,1,0); Do i=1 to B; X=J(m,1,0); ************************* Find MLE ************************; X=Data(alpha_mle,lambda_mle); Do idx=1 to m-1; if (( X[idx] < T) & (X[idx+1] >= T)) then q=idx; end; if ( X <= T) then q= m; x0_MLE = MLE_ret[1] || MLE_ret[2]; Call nlpnra(rc, MLE_ret, "MLE_func", x0_MLE, opt, Con,tc); B_alpha1[i] = MLE_ret[1]; B_lambda1[i]= MLE_ret[2]; ************************* Find MPS ************************; X_star=Data(alpha_MPS,lambda_MPS); x0_MPS = MPS_ret[1] || MPS_ret[2]; Do idx=1 to m-1; if (( X_star[idx] < T) & (X_star[idx+1] >= T)) then q=idx; end; if ( X_star[m] <= T) then q= m; call nlpqn(rc, MPS_ret, "MPS_func", x0_MPS, opt, Con,tc); B_alpha2[i] =MPS_ret[1]; B_lambda2[i] =MPS_ret[2]; END; *************Bootstrap***********; ********** alpha_MLE using Bootstrap ******; BootEST_alpha1[j]=mean(B_alpha1); SE_alpha1[j]=std(B_alpha1); call qntL(CI_alpha1, B_alpha1, a/2 || 1-a/2); Length_alpha1[j]=CI_alpha1[2]-CI_alpha1[1]; ********** Lambda_MLE using Bootstrap ******; BootEST_lambda1[j]=mean(B_lambda1); SE_lambda1[j]=std(B_lambda1); call qntL(CI_lambda1, B_lambda1, a/2 || 1-a/2); Length_lambda1[j]=CI_lambda1[2]-CI_lambda1[1]; ********** alpha_MPS using Bootstrap ******; BootEST_alpha2[j]=mean(B_alpha2); SE_alpha2[j]=std(B_alpha2); call qntL(CI_alpha2, B_alpha2, a/2 || 1-a/2); Length_alpha2[j]=CI_alpha2[2]-CI_alpha2[1]; ********** Lambda_MPS using Bootstrap ******; BootEST_lambda2[j]=mean(B_lambda2); SE_lambda2[j]=std(B_lambda2); call qntL(CI_lambda2, B_lambda2, a/2 || 1-a/2); Length_lambda2[j]=CI_lambda2[2]-CI_lambda2[1]; **************************************************; end; * END Of Simulations ; %OPTIONS NOTES; P_Length_alpha_MLE=Length_alpha1[:]; P_Length_Lambda_MLE=Length_Lambda1[:]; P_Length_alpha_MPS=Length_alpha2[:]; P_Length_Lambda_MPS=Length_Lambda2[:]; Print P_Length_alpha_MLE P_Length_alpha_MPS; print P_Length_Lambda_MLE P_Length_Lambda_MPS; ********** Results for MLE *****************; B_Est_alpha_MLE = round(BootEst_alpha1[:],0.0001); B_SE_alpha_MLE = SE_alpha1[:]; B_length_alpha_MLE = round(Length_alpha1[:],0.0001); B_Est_lambda_MLE = round(BootEst_lambda1[:],0.0001); B_SE_lambda_MLE = SE_lambda1[:]; B_length_lambda_MLE = round(Length_lambda1[:],0.0001); Length_normal_alpha_MLE=2*quantile('NORMAL', .975)*B_SE_alpha_MLE /sqrt(m); * this method which is called Normal bootstrap, works better; Length_normal_lambda_MLE=2*quantile('NORMAL', .975)*B_SE_lambda_MLE /sqrt(m); ********** Results for MPS *****************; B_Est_alpha_MPS = round(BootEst_alpha2[:],0.0001); B_SE_alpha_MPS = SE_alpha2[:]; B_length_alpha_MPS = round(Length_alpha2[:],0.0001); B_Est_lambda_MPS = round(BootEst_lambda2[:],0.0001); B_SE_lambda_MPS = SE_lambda2[:]; B_length_lambda_MPS = round(Length_lambda2[:],0.0001); Length_normal_alpha_MPS=2*quantile('NORMAL', .975)*B_SE_alpha_MPS /sqrt(m); * this method wors better; Length_normal_lambda_MPS=2*quantile('NORMAL', .975)*B_SE_lambda_MPS /sqrt(m); Print n m Scheme T q alpha Lambda k B seed; Print Length_normal_alpha_MLE Length_normal_alpha_MPS; Print Length_normal_lambda_MLE Length_normal_lambda_MPS ; Quit;
... View more