#COLLATION AGU05 PRESENTATION source("c:/climate/scripts/treeconfig.functions.txt") tsread<-function(g) {tsread<-ts(g[,2:ncol(g)],start=g[1,1],end=g[nrow(g),1]); tsread} proxy<-array(NA,dim=c(1990,9)) #from 967 to 1990 proxy<-data.frame(proxy) names(proxy)<-c("CRU","J98","MBH99","MJ03","CL00","BJ00","BJ01","Esp02","Mob05") library(waveslim) #LOAD INDIVIDUAL COMPONENTS #CENTER on 1902-1980 MBH #1. CRU url<-"ftp://ftp.cru.uea.ac.uk/data/tavenh2v.dat" #there is also version tavenh2.dat h<-scan(url) N<-length(h) h<-array(h,dim=c(27,N/27)) h<-t(h) CRU<-h[,14] #1856 - 2005 index<-(1902-1855):(1980-1855) sd.cru<-sd(CRU[index]);sd.cru # 0.18595 mean(CRU[(1961-1855):(1990-1855)]) # 0.01486667 mean.cru<-mean(CRU[index]);mean.cru # -0.1284557 CRU<-CRU-mean.cru proxy$CRU[1856:1990]<-CRU[1:(1990-1855)] #2. JONES 98 #adjsted to 1902-1980 mean from 1960-1990 mean #1961-1990 anomaly #from 1000 to 1991 #Data Files: Jonesdata.txt and Jonesdata.xls contain the original series in #normalised units as well as anomalies in Degreees C vs 1961-90 mean. #The 1961-90 anomalies are related to the normalised series as follows: #nh*0.521 - 0.1134 #sh*0.608 - 0.2394 #YearAD N.Hem.Norm. N.Hem.Anom. N.Hem#Prox S.Hem.Norm. S.Hem.Anom. S.Hem#Prox url<-"ftp://ftp.ngdc.noaa.gov/paleo/contributions_by_author/jones1998/jonesdata.txt" h<-read.table(url,skip=48,sep="\t") h<-tsread(h) tsp(h) #1000 1991 apply(h[(1961-tsp(h)[1]+1):(1990-tsp(h)[1]+1),],2,mean) # 0.11366667 -0.05417667 7.93333333 0.47066667 0.04676667 5.96666667 apply(h[(1902-tsp(h)[1]+1):(1980-tsp(h)[1]+1),],2,mean) # 0.0354 -0.0949 9.9114 0.1635 -0.1400 7.0000 Jones98<-h[,2] Jones98<-Jones98-mean(Jones98[903:981]) proxy$J98[1000:1990]<-Jones98[1:991] #RECALIBRATED JONES IN JONES ET AL 2001 #fn 16: The target series are April to September land-only values north of 20ˇN for Jones et al. (9) over 1881-1960, #ftp://ftp.ngdc.noaa.gov/paleo/contributions_by_author/jones2001/jones2001_fig2.txt #smoothed version only #3. LOAD MBH99 #this 1902-1980 mean url<-"ftp://ftp.ngdc.noaa.gov/paleo/contributions_by_author/mann1999/recons/nhem-recon.dat" g<-read.table(url) MBH99<-tsread(g) tsp(MBH99)#1000 1980 proxy$MBH99[1000:1980]<-MBH99[1:981] #4. LOAD BRIFFA00 and BRIFFA01 #in 1960-1990 mean url<-"ftp://ftp.ngdc.noaa.gov/paleo/treering/reconstructions/n_hem_temp/briffa2001jgr3.txt" #readLines(url)[1:50] g<-read.table(url,skip=24,fill=TRUE) g<-tsread(g) temp<-(g< -900) g[temp]<-NA Briffa<-g tsp(Briffa) #1000 1999 dimnames(Briffa)[[2]]<-c("Jones98","MBH99","Briffa01","Briffa00","Overpeck97","Crowley00","CRU99") #see Briffa et al 01 compilation apply(Briffa[961:990,],2,mean,na.rm=T) # Jones98 MBH99 Briffa01 Briffa00 Overpeck97 Crowley00 CRU99 # -0.0683 -0.0545 0.0760 -0.1182 -0.0591 -0.0656 0.0100 Briffa00<-Briffa[,"Briffa00"] #this is long RW series Briffa00<-Briffa00-mean(Briffa00[903:981]) proxy$BJ00[1000:1990]<-Briffa00[1:991] Briffa01<-Briffa[,"Briffa01"] #this is long MXD series Briffa01<-Briffa01-mean(Briffa01[903:981],na.rm=T) proxy$BJ01[1000:1990]<-Briffa01[1:991] #5. ESPER02 #831-1992 #CALIBRTED PER COOK ET AL 2004 #Figure 1: All of the reconstructions have been re-calibrated #identically with NH land-only mean annual temperatures northof 201N and smoothed with the same 50-year low-pass filter. url<-"ftp://ftp.ngdc.noaa.gov/paleo/treering/reconstructions/n_hem_temp/esper2002_nhem_temp.txt" h<-read.table(url,skip=53,fill=TRUE) h<-tsread(h) Esper02<-h tsp(Esper02) # 831 1992 load("c:/climate/data/esper05/annual.tab") #this is collected in esper05/seasonal.instrumental.txt test<-annual[,"CRU.NH.AS"] mean.obs<-mean(test[(1902-1850):(1980-1850)]) sd.obs<-sd(test[(1902-1850):(1980-1850)]) index<- (1902-830):(1980-830) sd.esper<-sd(Esper02[index]);sd.esper mean.esper<-mean(Esper02[index]);mean.esper Esper02<- mean.obs+ (Esper02-mean.esper)* (sd.obs/sd.esper) #rescaled to sd(Esper02[index]) # 0.189 proxy$Esp02[831:1990]<-Esper02[(831-830):(1990-830)] c(sd (proxy$Esp02[1902:1980]),sd.obs) #6. MOBERG #SI at http://www.nature.com/nature/journal/v433/n7026/suppinfo/nature03265.html url<-"http://www.nature.com/nature/journal/v433/n7026/extref/nature03265-s6.doc" recon<-read.table(url,skip=19,nrow=1998-19) #1979 9 #from 1 to 1979 name<-c("year","recon","lowf","lowf.lb","lowf.ub","ci.lb","ci.ub","ci2.lb","ci2.ub") dimnames(recon)[[2]]<-name range(recon[,"year"]) # 1 1979 x<-recon[1:1979,"recon"] x<-x-mean(x[1902:1979]) proxy$Mob05[1:1979]<-x rm(recon) #7. MANN JONES 03 url<-"ftp://ftp.ncdc.noaa.gov/pub/data/paleo/contributions_by_author/mann2003b/mann2003b.txt" #All series are anomalies based on 1961-1990 instrumental reference period. #Data are presented as both decadally-resolved series (added to this file 12/2004) #and 40 year smoothed versions of the decadally-resolved reconstrucitons #(as shown in Figure 2 of Mann and Jones). recon<-read.table(url,skip=1858,nrow=(1980-199)) #this is NH from 200 to 1980 # year recon x<-recon[1:(1980-199),2] x<- x-mean(x[(1902-199):(1980-199)]) proxy$MJ03[200:1980]<-x #8. CROWLEY LOWERY SPLICED url<-"ftp://ftp.ngdc.noaa.gov/paleo/gcmoutput/crowley2000/crowley_fig1_data.txt" crowley<-read.table(url,skip=1,sep="\t") dimnames(crowley)[[2]]<-c("Year","CL2.Jns11","Mn.sm11","CL2","+2s.d.adj","-2s.d.adj") url<-"c:/climate/data/crowley/CL0.txt" CL0<-read.table(url,sep="\t",header=TRUE) #this is proxy without splicing CL0<-CL0[,2]-mean(CL0[903:981,2]) proxy$CL00[1000:1987]<-CL0 dim(proxy) # 1024 9 apply(proxy[1902:1980,],2,mean,na.rm=TRUE) # CRU J98 MBH99 MJ03 CL00 #-7.553733e-18 1.317512e-18 -2.305645e-19 -1.229677e-18 -1.317512e-18 # BJ00 BJ01 Esp02 Mob05 #-7.905069e-18 -1.340735e-17 -9.344626e+00 -3.113606e-18 sd0<-apply(proxy,2,sd,na.rm=T);sd0 sd.cal<-apply(proxy[1902:1980,],2,sd,na.rm=T);sd.cal #also see Crowley versions N<-ncol(proxy) proxy$year<- c(1:1990) proxy<-proxy[,c((N+1),1:N)] save(proxy,file="c:/climate/data/agu05/proxy.tab") write.table(proxy,file="c:/climate/data/agu05/proxy.txt",sep="\t",quote=FALSE) ###SPAGHETTI load("c:/climate/data/agu05/proxy.tab") #1990 9 fred<-proxy[800:1990,] dim(proxy) fred<-ts(fred,start=800) f<-function(x) {f<-filter.combine.pad(x,truncated.gauss.weights(21))[,2];f} for (i in 1:9) {fred[,i]<-f(fred[,i])} #col0<-c("blue4","blue1","dodgerblue2","steelblue1","springgreen1","yellow","orange2","darkorange2","red1","red3") col0<-c("black", "blue4","blue1","yellow","dodgerblue2","orange","steelblue1","springgreen1", "red1" ) ylim0<-range(fred,na.rm=T) par(mar=c(3,3,1,1)) plot(800:1990,fred[,1],type="l",col=col0[1],lwd=2,ylim=ylim0,axes=FALSE,xlab="",ylab="") for (i in 2:9) { lines(800:1990,fred[,i],col=col0[i],lwd=2)} id<-dimnames(proxy)[[2]] cbind(id,col0) axis(side=2,font=2,las=1);axis(side=1,font=2);box() #VERSION AT AD1000 fred<-proxy[1000:1990,] fred<-ts(fred,start=800) f<-function(x) {f<-filter.combine.pad(x,truncated.gauss.weights(21))[,2];f} for (i in 1:9) {fred[,i]<-f(fred[,i])} par(mar=c(3,3,1,1)) plot(1000:1990,fred[,1],type="l",col=col0[1],lwd=2,ylim=ylim0,axes=FALSE,xlab="",ylab="") for (i in 2:9) { lines(1000:1990,fred[,i],col=col0[i],lwd=2)} id<-dimnames(proxy)[[2]] cbind(id,col0) axis(side=2,font=2,las=1);axis(side=1,font=2);box() #CALCULATE "LOW FREQ RANGE" f<-function(x) { f<-range(x,na.rm=TRUE);f<-f[2]-f[1];f} test<-apply(fred,2,f); test # CL00 MJ03 BJ00 MBH99 CRU BJ01 J98 Mob05 Esp02 #0.419 0.456 0.500 0.531 0.568 0.731 0.787 0.991 1.189 test1<-apply(proxy,2,f); test1 # CRU J98 MBH99 MJ03 CL00 BJ00 BJ01 Esp02 Mob05 #1.044 1.751 0.737 0.572 0.527 0.884 1.332 2.018 1.635 order1<-order(test[2:9]) barplot(test[2:9][order1],col=col0[2:9][order1],font=2,par(bg="grey80"));#abline(h=test[1]) barplot(test1,col=col0,font=2);abline(h=test1[1]) #COMPARE J98 VARIANCE TO CRU JJA test2<-c(sd(JJA[(1901-1855):(1950-1855)]),sd(proxy[1901:1950,2])) # 0.217 0.217 names(test2)<-c("CRU","J98") barplot(test2,col=col0,font=2,ylim=c(0,.4)) #COMPARE MBH99 VARIANCE TO CRU JJA test3<-c( sd(CRU[(1902-1855):(1980-1855)]),sd(proxy[1902:1980,3]),sd(proxy[1948:1980,3])) #test3<-array(test3,dim=c(2,2))# 0.1860 0.1825 0.0958 names(test3)<-c("CRU","MBH 1902-80","MBH 1948-80") barplot(test3[1:2],col=c("black","blue4","steelblue1"),font=2,ylim=c(0,.4)) #COMPARE ESPER VARIANCE test<-annual[,"CRU.NH.AS"] mean.obs<-mean(test[(1902-1850):(1980-1850)]) sd.obs<-sd(test[(1902-1850):(1980-1850)]) test4<-c( sd.obs, sd(proxy[1902:1980,"Esp02"])) ;test4 # 0.189 0.189 names(test4)<-c("CRU Land","Esp02") barplot(test4,col=c("black","springgreen1"),font=2,ylim=c(0,.4)) #COMPARE MOBERG VARIANCE test5<- apply(proxy[1902:1979,c("CRU","Mob05")],2,sd) ;test5 # 0.185 0.149 names(test5)<-c("CRU","Mob05") barplot(test5,col=c("black","red1"),font=2,ylim=c(0,.4)) id col0 [1,] "CRU" "black" [2,] "J98" "blue4" [3,] "MBH99" "blue1" [4,] "MJ03" "yellow" [5,] "CL00" "dodgerblue2" [6,] "BJ00" "orange" [7,] "BJ01" "steelblue1" [8,] "Esp02" "springgreen1" [9,] "Mob05" "red1" sd.proxy<-apply(proxy,2,sd,na.rm=T) sd.short<-apply(proxy[1902:1980,],2,sd,na.rm=T) test<-array(cbind(sd.short,sd.proxy),dim=c(9,2)) test<-t(test) col00<-c(t(array(rep(col0,2),dim=c(9,2)))) barplot(test,col=col00,beside=TRUE,names.arg=id,font=2) test6<-sd.proxy/sd.short col0<-c("black", "blue4","blue1","yellow","dodgerblue2","orange","steelblue1","springgreen1", "red1" ) barplot(sd.proxy/sd.short,col=col0,font=2) f<-function(x) { f<-range(x,na.rm=TRUE);f<-f[2]-f[1];f} test<-apply(fred,2,f); test par(mar=c(4,4,2,2)) plot(test,test6,pch=19,cex=1.1,col=col0,ylim=c(0,2),xlim=c(0,1.2),xlab="Low-Frequency Range",font.axis=2,ylab="LRV/SRV- sdu units",font=2) mean.proxy<-apply(proxy[,2:9],1,mean,na.rm=T) ylim0<-range(c(mean.proxy+2*sd.proxy,mean.proxy-2*sd.proxy),na.rm=T) plot(1:1990,mean.proxy,type="l",ylim=ylim0) lines(1:1990,mean.proxy+2*sd.proxy,col="red1") lines(1:1990,mean.proxy-2*sd.proxy,col="red1") ###### #ARIMA ANALYSIS ####################### load("c:/climate/data/agu05/proxy.tab") f<-function(x) {temp<-!is.na(x); x<-x[temp]; fm<-arima(x,order=c(1,0,0)); f<-coef(fm)[1];f} test<-apply(proxy,2,f) barplot(test,col=col0) test # CRU J98 MBH99 MJ03 CL00 BJ00 BJ01 Esp02 Mob05 #0.721 0.527 0.656 0.993 0.994 0.634 0.558 0.814 0.909 f<-function(x) {temp<-!is.na(x); x<-x[temp]; fm<-arima(x,order=c(1,0,1)); f<-c(coef(fm)[1:2],sqrt(diag(fm$var.coef)[1:2]),sqrt(fm$sigma2));names(f)<-c("ar1","ma1","se.ar1","se.ma1","sigma");f} proxy<-proxy0 arima.test<-apply(proxy,2,f) test1<-test[1:2,] # CRU J98 MBH99 MJ03 CL00 BJ00 BJ01 Esp02 Mob05 #ar1 0.977 0.944 0.940 0.993 0.990 0.929 0.899 0.955 0.861 #ma1 -0.667 -0.694 -0.599 0.929 0.262 -0.569 -0.557 -0.480 0.945 barplot(test1,col=col00,beside=TRUE,ylim=c(-1,1),font=2);box() ################3 ##STATISTICS #################### load("c:/climate/data/agu05/proxy.tab") range.proxy<-array(NA,dim=c(9,3)) index<-1000:1990 for (i in 1:9) { range.proxy[i,1:2]<-range(proxy[index,i],na.rm=T)} range.proxy[,3]<-range.proxy[,2]-range.proxy[,1] row.names(range.proxy)<-id stat<-array(NA,dim=c(10,5)) index<- 949:981 stat[,5]<-apply(proxy[index,],2,sd,na.rm=TRUE);stat[,5] # CRU J98 MBH99 CL00 Esp02 Mob05 MJ03 B00 B01 CRU.smooth #0.1581 0.1796 0.0958 0.0670 0.0521 0.1193 0.0458 0.1210 0.1423 0.0546 index<- 902:981 stat[,4]<-apply(proxy[index,],2,sd,na.rm=TRUE) # CRU J98 MBH99 CL00 Esp02 Mob05 MJ03 B00 B01 CRU.smooth #0.2046 0.2025 0.1822 0.0561 0.0536 0.1491 0.0956 0.1436 0.1595 0.1462 stat[,3]<-apply(proxy[852:901,],2,sd,na.rm=TRUE) # CRU J98 MBH99 CL00 Esp02 Mob05 MJ03 B00 B01 CRU.smooth #0.2120 0.1615 0.0837 0.0546 0.0720 0.1131 0.0410 0.1079 0.1672 0.0684 stat[,2]<-apply(proxy[401:851,],2,sd,na.rm=TRUE) # CRU J98 MBH99 CL00 Esp02 Mob05 MJ03 B00 B01 # NA 0.2023 0.1043 0.0696 0.1057 0.2056 0.0713 0.1339 0.2097 stat[,1]<-apply(proxy[1:400,],2,sd,na.rm=TRUE) # CRU J98 MBH99 CL00 Esp02 Mob05 MJ03 B00 B01 # NA 0.2087 0.1115 0.0541 0.1544 0.1922 0.0601 0.1263 NA row.names(stat)<-names(proxy) write.table(stat,file="c:/climate/data/agu05/stat.txt",sep="\t") #check VZ [2004] fn19 comment that" the standard deviation of the MBH99 #reconstructed NH temperature in the period 1948-1980 is #0.9 K, compared to 1.4K derived from the NCEP #reanalysis or from the Jones et al (1998) NH gridded #instrumental data. sd(MBH99[949:981]) # 0.09582343 sd(CRU[(1948-1855):(1980-1855)]) # 0.1580797 ###RANGE OF VARIABILITY ##RECENTER AND SMOOTH index<-903:981 m0<-apply(multi[index,],2,mean) multi<-scale(multi,scale=FALSE,center=m0) multi.smooth<-multi for(i in 1:5) { multi.smooth[,i]<-filter.combine.pad(multi[,i],truncated.gauss.weights(13))[,2]} dummy<- filter.combine.pad( ts(WA$recon,start=1400,end=1980),truncated.gauss.weights(13))[,2] #from Huybers par(mar=c(4,4,2,2)) col9<-c("blue","green","red","purple","black") plot(1000:1990,multi.smooth[1:991,1],col=col9[1],xlab="",ylab="",ylim=c(-.7,.5),axes=FALSE,type="l") for(i in 2:5) {lines(1000:1990,multi.smooth[1:991,i],col=col9[i])} lines(1400:1980,dummy,col="salmon") axis(side=1);axis(side=2);box() ##MAKE DECADAL f<-function(x) { N<-length(x) x<-array(x,dim=c(10,N/10)) f<-apply(x,2,mean,na.rm=TRUE) f} decadal.multi<-apply(multi[1:(1990-830),],2,f) dimnames(decadal.multi)[[2]]<-c("Jones98","MBH99","Briffa00","Esper02") start0<-82 f<-function(x) { N<-length(x); f<-c( (1:N)[(x==min(x,na.rm=TRUE))&!is.na(x) ] , (1:N)[(x==max(x,na.rm=TRUE))&!is.na(x) ]);f <-10*( f+start0);f} apply(decadal.proxy,2,f) # Jones98 MBH99 Briffa00 Esper02 #[1,] 1690 1460 1810 1290 #[2,] 1930 1940 1950 980 start0<-99 apply(decadal.proxy[18:nrow(decadal.proxy),],2,f) Jones98 MBH99 Briffa00 Esper02 [1,] 1690 1460 1810 1290 [2,] 1930 1940 1950 1950 f1<-function(x) { N<-length(x); f1<- max(x,na.rm=TRUE)- min(x,na.rm=TRUE);f1} range.proxy.dec<-apply(decadal.proxy[18:nrow(decadal.proxy),],2,f1); round(range.proxy.dec,2) #Jones98 MBH99 Briffa00 Esper02 # 0.73 0.51 0.47 0.52 ##INSTRUMENTAL VERSIONS rm(v) load("c:/climate/data/jones/crutem2.tab") dim(v)#1860 2592 monthly<-array ( rep(NA,nrow(v)*18),dim=c(nrow(v),18)) for (i in 1:18) { monthly[,i]<-apply(v[,(1:72)+(i-1)*72],1,mean,na.rm=TRUE) } mu1<-cos( seq(87.5,2.5,-5) *pi/180) NH.CRU<-array(rep(NA,3*nrow(monthly)),dim=c(nrow(monthly),3)) NH.CRU<-ts(NH.CRU,start=c(1851,1),frequency=12) dimnames(NH.CRU)[[2]]<-c("CRU.NH","CRU20.90","CRU30.70") f2<-function (x) { f2<- weighted.mean(x,cos( seq(87.5,2.5,-5) *pi/180),na.rm=TRUE); f2} NH.CRU[,"CRU.NH"]<-apply(monthly,1,f2) f1<-function (x) { f1<- weighted.mean(x,mu1[1:14],na.rm=TRUE); f1} NH.CRU[,"CRU20.90"]<-apply(monthly[,1:14],1,f1) f3<-function (x) { f3<- weighted.mean(x,mu1[5:12],na.rm=TRUE); f3} NH.CRU[,"CRU30.70"]<-apply(monthly[,5:12],1,f3) rm(v) load("c:/climate/data/jones/hadcrut2.tab") dim(v)#1791 2592 monthly<-array ( rep(NA,nrow(v)*18),dim=c(nrow(v),18)) for (i in 1:18) { monthly[,i]<-apply(v[,(1:72)+(i-1)*72],1,mean,na.rm=TRUE) } NH.HAD<-array(rep(NA,2*nrow(monthly)),dim=c(nrow(monthly),2)) NH.HAD<-ts(NH.HAD,start=c(1856,1),frequency=12) dimnames(NH.HAD)[[2]]<-c("HAD.NH","HAD20.90") f2<-function (x) { f2<- weighted.mean(x,cos( seq(87.5,2.5,-5) *pi/180),na.rm=TRUE); f2} NH.HAD[,"HAD.NH"]<-apply(monthly,1,f2) f1<-function (x) { f1<- weighted.mean(x,mu1[1:14],na.rm=TRUE); f1} NH.HAD[,"HAD20.90"]<-apply(monthly[,1:14],1,f1) rm(v) NH.monthly<-ts.union(NH.CRU,NH.HAD) dimnames(NH.monthly)[[2]]<-c(dimnames(NH.CRU)[[2]],dimnames(NH.HAD)[[2]]) tsp(NH.monthly)#1851.000 2005.167 12.000 N<- floor(nrow(NH.monthly)/12) *12 NH.monthly<-NH.monthly[1:N,] save(NH.monthly,file="c:/climate/data/esper05/NH.monthly.tab") #ANNUAL AND SEASONAL annual<-array(rep(NA,10*N/12),dim=c(N/12,10)) dimnames(annual)[[2]]<-c("HAD.NH.ANN","HAD.NH.AS","HAD.20.ANN","HAD.20.AS","CRU.NH.ANN","CRU.NH.AS","CRU.20.ANN","CRU.20.AS",NA,NA) annual[,c(1,3,5,7)]<-apply(NH.monthly[,c(1,2,4,5)],2,annavg) f<-function (x) { f<- seasavg(x,TRUE,4:9); f} annual[,c(2,4,6,8)]<-apply(NH.monthly[,c(1,2,4,5)],2,f) round(cor(annual[,1:8],use=use0),2) HAD.NH.ANN HAD.NH.AS HAD.20.ANN HAD.20.AS CRU.NH.ANN CRU.NH.AS CRU.20.ANN CRU.20.AS HAD.NH.ANN 1.00 0.94 0.96 0.84 0.96 0.91 0.95 0.87 HAD.NH.AS 0.94 1.00 0.92 0.94 0.93 0.95 0.90 0.93 HAD.20.ANN 0.96 0.92 1.00 0.91 0.95 0.91 0.98 0.92 HAD.20.AS 0.84 0.94 0.91 1.00 0.87 0.92 0.89 0.96 CRU.NH.ANN 0.96 0.93 0.95 0.87 1.00 0.96 0.97 0.91 CRU.NH.AS 0.91 0.95 0.91 0.92 0.96 1.00 0.93 0.96 CRU.20.ANN 0.95 0.90 0.98 0.89 0.97 0.93 1.00 0.94 CRU.20.AS 0.87 0.93 0.92 0.96 0.91 0.96 0.94 1.00 ##MAKE DECADAL f<-function(x) { N<-length(x) x<-array(x,dim=c(10,N/10)) f<-apply(x,2,mean,na.rm=TRUE) f} decadal<-apply(annual[1:130,],2,f) round(cor(decadal),2) HAD.NH.ANN HAD.NH.AS HAD.20.ANN HAD.20.AS CRU.NH.ANN CRU.NH.AS CRU.20.ANN CRU.20.AS HAD.NH.ANN 1.00 0.95 0.93 0.71 0.94 0.88 0.95 0.85 0.98 0.75 HAD.NH.AS 0.95 1.00 0.98 0.89 0.93 0.94 0.96 0.95 0.97 0.91 HAD.20.ANN 0.93 0.98 1.00 0.91 0.92 0.94 0.96 0.96 0.96 0.92 HAD.20.AS 0.71 0.89 0.91 1.00 0.76 0.88 0.83 0.95 0.79 0.99 CRU.NH.ANN 0.94 0.93 0.92 0.76 1.00 0.97 0.99 0.91 0.95 0.80 CRU.NH.AS 0.88 0.94 0.94 0.88 0.97 1.00 0.97 0.98 0.92 0.90 CRU.20.ANN 0.95 0.96 0.96 0.83 0.99 0.97 1.00 0.95 0.97 0.85 CRU.20.AS 0.85 0.95 0.96 0.95 0.91 0.98 0.95 1.00 0.90 0.95 0.98 0.97 0.96 0.79 0.95 0.92 0.97 0.90 1.00 0.82 0.75 0.91 0.92 0.99 0.80 0.90 0.85 0.95 0.82 1.00 ##DECADAL RANGE f<-function(x) { f<-range(x,na.rm=TRUE); f<-f[2]-f[1];f} round(apply(decadal,2,f),2) #HAD.NH.ANN HAD.NH.AS HAD.20.ANN HAD.20.AS CRU.NH.ANN CRU.NH.AS CRU.20.ANN CRU.20.AS # 0.54 0.41 0.62 0.50 0.50 0.42 0.62 0.50 0.00 0.00 f<-function(x) { f<- c(x[13]-x[2],x[6]-x[1]); f} round(apply(decadal,2,f),2) # HAD.NH.ANN HAD.NH.AS HAD.20.ANN HAD.20.AS CRU.NH.ANN CRU.NH.AS CRU.20.ANN CRU.20.AS #[1,] 0.28 0.09 0.14 -0.03 0.21 0.09 0.22 0.07 0 0 #[2,] -0.03 -0.13 -0.25 -0.41 -0.05 -0.17 -0.14 -0.31 0 0 ##FIGURE S2 nf<-layout(1:3,c(3,1),heights=c(1.2,1,1.4)) nf #PANEL 1: HAD NH vs CRU NH - ANNUAL par(mar=c(0,4,2,2)) plot(1851:1980,annual[1:130,5],type="l",xlab="",ylab="",axes=FALSE,ylim=c(-.9,.4)) axis(side=2) axis(side=1,labels=FALSE) abline(h=0) box() lines(1851:1980,annual[1:130,1],col="red") v<-lowess(annual[6:130,5],f=20/130) lines(1856:1980,v$y) v<-lowess(annual[1:130,1],f=20/130) lines(1851:1980,v$y,col="red") #PANEL 2:CRU ANN NH CRU ANN 20 par(mar=c(0,4,0,2)) plot(1851:1980,annual[1:130,5],type="l",xlab="",ylab="",axes=FALSE,ylim=c(-.9,.4)) axis(side=2) axis(side=1,labels=FALSE) abline(h=0) box() lines(1851:1980,annual[1:130,7],col="red") v<-lowess(annual[6:130,5],f=20/130) lines(1856:1980,v$y) v<-lowess(annual[6:130,7],f=20/130) lines(1856:1980,v$y,col="red") #PANEL 3: CRU ANN NH vs CRU ANN AS par(mar=c(4,4,0,2)) plot(1851:1980,annual[1:130,5],type="l",xlab="",ylab="",axes=FALSE,ylim=c(-.9,.4)) axis(side=2) axis(side=1) abline(h=0) box() lines(1851:1980,annual[1:130,6],col="red") v<-lowess(annual[6:130,5],f=20/130) lines(1856:1980,v$y) v<-lowess(annual[6:130,6],f=20/130) lines(1856:1980,v$y,col="red") h<-readLines(url) temp<-seq(1,length(h)-1,2) h<-h[temp] h<-as.numeric(substr(h,92,98)) source("c:/climate/scripts/config.CRU.txt") NH land and sea CRU 1902-1980; 1961-1990 30-70 land; from Cook et al 2004 (Tables 1) 1856 - 1992 The temperature data are ‘‘land-only’’ in the same 30–701N latitude band as the tree-ring sites. This differs from the full NH annual average temperature series used by Esper et al. (2002), but that result would be very similar. 20-90 land Briffa et al 2004 1881-1960 Fig. 8. Average temperature over land areas north of 20jN, as observed (black) and reconstructed by a simple linear regression recalibration of published series by Jones et al. (1998) in red; Mann et al. (1999) in purple; Briffa and Osborn (1999) in green; Briffa et al. (2001) in blue; and Esper et al. (2002) in pink. The series used from Mann et al. (1999) was an average of land grid boxes north of 20jN from their spatially resolved reconstructions. ##CORERELATIONS ftp://ftp.ngdc.noaa.gov/paleo/treering/reconstructions/n_hem_temp/briffa2001jgr3.txt The following reconstructions have been taken from the source "references listed below, and then RECALIBRATED to obtain estimates" of April-September mean temperatures from all land regions north of 20N. 1871-1997 (used 1881-1960)7: Observed temperatures from Jones et al. (1999) Rev Geophys ##SCALING #calculate ranges for two index periods index2<-(1856-tsp(proxy)[1]+1):(1980-tsp(proxy)[1]+1) sd.proxy<-apply(proxy[index2,],2,sd) stat<-array(rep(NA,8*4),dim=c(4,8)) for (i in 1:4) {stat[i,]<- sd.obs[1:8]*( range.proxy.dec[i]/sd.proxy[i]) } stat<-data.frame(stat) dimnames(stat)[[1]]<-dimnames(proxy)[[2]] dimnames(stat)[[2]]<-dimnames(annual)[[2]][1:8] stat.instr<-stat sd.proxy #Jones98 MBH99 Briffa00 Esper02 # 0.26 0.18 0.14 0.10 HAD.NH.ANN HAD.NH.AS HAD.20.ANN HAD.20.AS CRU.NH.ANN CRU.NH.AS CRU.20.ANN CRU.20.AS Jones98 0.70 0.57 0.79 0.69 0.62 0.54 0.74 0.62 MBH99 0.69 0.56 0.78 0.68 0.61 0.53 0.73 0.61 Briffa00 0.81 0.66 0.92 0.80 0.72 0.62 0.86 0.72 Esper02 1.25 1.02 1.42 1.22 1.10 0.96 1.31 1.10 #calculate ranges for two index periods index<-(1900-tsp(proxy)[1]+1):(1977-tsp(proxy)[1]+1) sd.proxy<-apply(proxy[index,],2,sd) stat<-array(rep(NA,8*4),dim=c(4,8)) for (i in 1:4) {stat[i,]<- sd.obs[1:8]*( range.proxy.dec[i]/sd.proxy[i]) } stat<-data.frame(stat) dimnames(stat)[[1]]<-dimnames(proxy)[[2]] dimnames(stat)[[2]]<-dimnames(annual)[[2]][1:8] stat.cal<-stat;stat.cal sd.proxy # Jones98 MBH99 Briffa00 Esper02 # 0.201 0.182 0.145 0.088 stat.cal # HAD.NH.ANN HAD.NH.AS HAD.20.ANN HAD.20.AS CRU.NH.ANN CRU.NH.AS CRU.20.ANN CRU.20.AS Jones98 0.77 0.68 1.00 0.9 0.80 0.74 0.98 0.86 MBH99 0.60 0.53 0.78 0.7 0.63 0.57 0.76 0.67 Briffa00 0.69 0.61 0.90 0.8 0.72 0.66 0.88 0.78 Esper02 1.25 1.11 1.63 1.5 1.31 1.20 1.59 1.41 #calculate ranges for two index periods index<-(1856-tsp(proxy)[1]+1):(1901-tsp(proxy)[1]+1) sd.proxy<-apply(proxy[index,],2,sd) stat<-array(rep(NA,8*4),dim=c(4,8)) for (i in 1:4) {stat[i,]<- sd.obs[1:8]*( range.proxy.dec[i]/sd.proxy[i]) } stat<-data.frame(stat) dimnames(stat)[[1]]<-dimnames(proxy)[[2]] dimnames(stat)[[2]]<-dimnames(annual)[[2]][1:8] sd.proxy Jones98 MBH99 Briffa00 Esper02 0.172 0.086 0.110 0.061 stat.ver<-stat;stat.ver HAD.NH.ANN HAD.NH.AS HAD.20.ANN HAD.20.AS CRU.NH.ANN CRU.NH.AS CRU.20.ANN CRU.20.AS Jones98 0.90 0.80 1.2 1.0 0.94 0.86 1.1 1.0 MBH99 1.26 1.13 1.6 1.5 1.33 1.22 1.6 1.4 Briffa00 0.91 0.81 1.2 1.1 0.96 0.88 1.2 1.0 Esper02 1.80 1.61 2.4 2.1 1.89 1.73 2.3 2.0 > ##FIGURE 1 PANEL 2 stat.instr[,7]/stat.instr[,5] #latitude # 1.2 1.2 1.2 1.2 #consistent more or less stat.instr[,1]/stat.instr[,5] #sea # 0.95 0.95 0.95 0.95 stat.instr[,6]/stat.instr[,5] #sea # 0.92 0.92 0.92 0.92 stat.cal[,5]/stat.instr[,5] #period # 1.3 1.0 1.0 1.2 stat.ver[,5]/stat.instr[,5] #period # 1.5 2.1 1.3 1.7 ###FIGURE 2 index<-(1856:1980)-1850 mean.obs<-apply(annual[index,],2,mean) sd.obs<-apply(annual[index,],2,sd) col6<-c("black","red","blue","purple") #calculate ranges for two index periods index2<-(1856-tsp(proxy)[1]+1):(1980-tsp(proxy)[1]+1) mean.est<-apply(proxy[index2,],2,mean) sd.est<-apply(proxy[index2,],2,sd) nf <- layout(array(1:4,dim=c(1,4))) layout.show(nf) par(mar=c(3,4,2,0)) j<-1 i<-1 recon<-mean.obs[j]+ (proxy[index2,i]-mean.est[i])*sd.obs[j]/sd.est[i] v<-lowess(recon,f=20/130) plot(1856:1980,v$y,type="l",ylim=c(-.8,.2),xlab="",ylab="") abline(h=0) for (i in 2:4){ recon<-mean.obs[j]+ (proxy[index2,i]-mean.est[i])*sd.obs[j]/sd.est[i] v<-lowess(recon,f=20/130) lines(1856:1980,v$y,col=col6[i])} par(mar=c(3,0,2,0)) j<-2 i<-1 recon<-mean.obs[j]+ (proxy[index2,i]-mean.est[i])*sd.obs[j]/sd.est[i] v<-lowess(recon,f=20/130) plot(1856:1980,v$y,type="l",ylim=c(-.8,.2),xlab="",ylab="") abline(h=0) for (i in 2:4){ recon<-mean.obs[j]+ (proxy[index2,i]-mean.est[i])*sd.obs[j]/sd.est[i] v<-lowess(recon,f=20/130) lines(1856:1980,v$y,col=col6[i])} #palnbel 3-4 index<-(1900:1977)-1850 mean.obs<-apply(annual[index,],2,mean) sd.obs<-apply(annual[index,],2,sd) col6<-c("black","red","blue","purple") #calculate ranges for two index periods index2<-(1900-tsp(proxy)[1]+1):(1977-tsp(proxy)[1]+1) mean.est<-apply(proxy[index2,],2,mean) sd.est<-apply(proxy[index2,],2,sd) index2<-(1856:1980)-tsp(proxy)[1]+1 par(mar=c(3,0,2,0)) j<-1 i<-1 recon<-mean.obs[j]+ (proxy[index2,i]-mean.est[i])*sd.obs[j]/sd.est[i] v<-lowess(recon,f=20/130) plot(1856:1980,v$y,type="l",ylim=c(-.8,.2),xlab="",ylab="") abline(h=0) for (i in 2:4){ recon<-mean.obs[j]+ (proxy[index2,i]-mean.est[i])*sd.obs[j]/sd.est[i] v<-lowess(recon,f=20/130) lines(1856:1980,v$y,col=col6[i])} par(mar=c(3,0,2,2)) j<-2 i<-1 recon<-mean.obs[j]+ (proxy[index2,i]-mean.est[i])*sd.obs[j]/sd.est[i] v<-lowess(recon,f=20/130) plot(1856:1980,v$y,type="l",ylim=c(-.8,.2),xlab="",ylab="") abline(h=0) for (i in 2:4){ recon<-mean.obs[j]+ (proxy[index2,i]-mean.est[i])*sd.obs[j]/sd.est[i] v<-lowess(recon,f=20/130) lines(1856:1980,v$y,col=col6[i])} ##ARIMA VARIANCES stat1<-array(NA,dim=c(1000,3)) for (i in 1:1000) { x<-arima.sim(n=5000,model=list(ar=0.99,ma=-0.6)) n<-sample((5000-K),2) stat1[i,1]<-sd(x[n[1]:(n[1]+K)]) stat1[i,2]<-sd(x[n[2]:(n[2]+49)]) stat1[i,3]<-sd(x) } > apply(stat,2,mean) [1] 1.28 1.19 1.50 > apply(stat1,2,mean) [1] 1.47 1.32 2.85 > 1/(1-.95^2) [1] 10.3 > sqrt(1-.95^2)) Error: syntax error > sqrt(1/(1-.95^2)) [1] 3.2 > sqrt(1/(1-.99^2)) [1] 7.09 stat2<-array(NA,dim=c(1000,3)) K<-78 for (i in 1:1000) { x<-arima.sim(n=5000,model=list(ar=0.95)) n<-sample((5000-K),2) stat2[i,1]<-sd(x[n[1]:(n[1]+K)]) stat2[i,2]<-sd(x[n[2]:(n[2]+49)]) stat2[i,3]<-sd(x) } #2.45 2.18 3.18 stat3<-array(NA,dim=c(1000,3)) for (i in 1:1000) { x<-arima.sim(n=5000,model=list(ar=0.99)) n<-sample((5000-K),2) stat3[i,1]<-sd(x[n[1]:(n[1]+K)]) stat3[i,2]<-sd(x[n[2]:(n[2]+49)]) stat3[i,3]<-sd(x) } # 3.11 2.58 6.91 sqrt(1/(1-.99^2)) #[1] 7.09 s2 * (1 +?/? + (?+?)^2/(1-?^2) ) rho<-0.95 theta<- -0.6 func<-function(rho,theta){ func<-(1+theta/rho + (rho+theta)^2/(1-rho^2));func}# 1.62 1/(1-rho^2)# 10.3 func(0.99,-0.6) #[1] 8.04 CRU J98 MBH99 MJ03 CL00 BJ00 BJ01 Esp02 Mob05 ar1 0.9775 0.9440 0.9399 0.99260 0.99029 0.9286 0.8987 0.9546 0.8609 ma1 -0.6671 -0.6937 -0.5989 0.92873 0.26173 -0.5689 -0.5569 -0.4797 0.9446 se.ar1 0.0241 0.0176 0.0153 0.00368 0.00451 0.0162 0.0287 0.0102 0.0114 se.ma1 0.0963 0.0438 0.0388 0.00603 0.02632 0.0368 0.0574 0.0295 0.0051 sigma 0.1310 0.1835 0.0931 0.00652 0.01203 0.1014 0.1693 0.1632 0.0596 a1<-seq(.9,.99,.005) a2<-seq(-.7,-.4,.05) length(a1);length(a2) test<-array(NA,dim=c(19,7)) for (i in 1:19) { for (j in 1:7) { test[i,j]<-func(a1[i],a2[j])}} [,1] [,2] [,3] [,4] [,5] [,6] [,7] [1,] 0.433 0.607 0.807 1.03 1.29 1.57 1.87 [2,] 0.459 0.641 0.851 1.09 1.35 1.65 1.97 [3,] 0.487 0.679 0.900 1.15 1.43 1.74 2.07 [4,] 0.519 0.721 0.954 1.22 1.51 1.84 2.19 [5,] 0.554 0.768 1.014 1.29 1.60 1.95 2.33 [6,] 0.594 0.821 1.083 1.38 1.71 2.08 2.48 [7,] 0.639 0.881 1.161 1.48 1.83 2.22 2.65 [8,] 0.690 0.951 1.251 1.59 1.97 2.39 2.85 [9,] 0.750 1.031 1.355 1.72 2.13 2.58 3.08 [10,] 0.820 1.126 1.478 1.88 2.32 2.81 3.35 [11,] 0.904 1.239 1.625 2.06 2.55 3.09 3.68 [12,] 1.006 1.377 1.804 2.29 2.83 3.43 4.08 [13,] 1.133 1.549 2.028 2.57 3.18 3.85 4.58 [14,] 1.296 1.769 2.315 2.93 3.63 4.39 5.23 [15,] 1.512 2.063 2.698 3.42 4.22 5.11 6.09 [16,] 1.814 2.473 3.233 4.09 5.06 6.12 7.29 [17,] 2.266 3.087 4.034 5.11 6.31 7.63 9.09 [18,] 3.017 4.109 5.369 6.80 8.39 10.16 12.09 [19,] 4.519 6.152 8.037 10.17 12.56 15.20 18.09 func(.977,-.667)#2.43 func(.944,-.694)# 0.839 func(0.940,-.599) # 1.36 func(0.929,-.569) # 1.33 func(0.899,-.557)# 0.99 func(0.955,-0.48) #3.06 contour(x=a1,y=a2,z=test) filled.contour(x=a1,y=a2,z=test,levels=c(seq(0.5,1,.25),seq(2,14,2)), points(.967,-.6,pch=19)) #contour(x=a1,y=a2,z=test,levels=c(seq(0.5,1,.25),seq(2,14,2)),add=TRUE) points(arima.test[1:2,c(1:3,8)],pch=19) ##VARIANCES WITH ARMA(1,1) # CRU J98 MBH99 MJ03 CL00 BJ00 BJ01 Esp02 Mob05 #ar1 0.977 0.944 0.940 0.993 0.990 0.929 0.899 0.955 0.861 #ma1 -0.667 -0.694 -0.599 0.929 0.262 -0.569 -0.557 -0.480 0.945 stat<-array(NA,dim=c(1000,3,6)) K<-78 index<-c(1,2,3,8) for (j in 1:4) { for (i in 1:1000) { x<-arima.sim(n=5000,model=list(ar=arima.test[1,index[j]],ma=arima.test[2,index[j]])) n<-sample((5000-K),2) stat[i,1,j]<-sd(x[n[1]:(n[1]+K)]) stat[i,2,j]<-sd(x[n[2]:(n[2]+49)]) stat[i,3,j]<-sd(x) } } #j j<-j+1 test<-stat[,,j] apply(test,2,mean) [1] 1.27 1.17 1.76 #2.43*.13 #0.316 > j<-j+1 > test<-stat[,,j] > apply(test,2,mean) [1] 1.13 1.09 1.25 #.839 *.1835 # 0.154 > j<-j+1 > test<-stat[,,j] > apply(test,2,mean) [1] 1.23 1.17 1.41 #1.36 *.0931 # 0.127 > j<-j+1 > test<-stat[,,j] > apply(test,2,mean) [1] 1.50 1.36 1.87 #3.06 *.1632 # 0.499 arima(crowley.versions[,3],order=c(1,0,1)) Call: arima(x = crowley.versions[, 3], order = c(1, 0, 1)) Coefficients: ar1 ma1 intercept 0.989 -0.692 0.483 s.e. 0.005 0.031 0.038 sigma^2 estimated as 0.00235: log likelihood = 1579, aic = -3149 arima(crowley.versions[,4],order=c(1,0,1)) Coefficients: ar1 ma1 intercept 0.992 -0.761 0.485 s.e. 0.004 0.029 0.050 sigma^2 estimated as 0.00330: log likelihood = 1413, aic = -2818 func(.989,-.692)# 4.33 func(.992,-.761)# 3.58 test1<-stat[,,1][,2] test2<-stat[,,3][,2] n<-sample(1:1000,500) m<-sample(1:1000,500) fred<-test1[n]/test2[m] hist(fred) median(fred) #1.01 2.43/1.06 # 2.29 quantile(fred,c(.025,.975)) ##0.612 1.815 test2<-stat[,,4][,2] fred<-test1[n]/test2[m] hist(fred) median(fred) # 0.88 2.43/3.06 # 0.794 quantile(fred,c(.025,.975)) ##0.496 1.611 ##SPURIOUS REGRESSION library(lmtest) load("c:/climate/data/agu05/proxy.tab") #1990 9 dim(proxy) fred<-ts(fred,start=800) f<-function(x) {f<-filter.combine.pad(x,truncated.gauss.weights(21))[,2];f} for (i in 1:9) {fred[,i]<-f(fred[,i])} #REGRESSION 1 - Jones98 stat.regress<-array(NA,dim=c(8,13)) stat.regress<-data.frame(stat.regress) names(stat.regress)<-c("RE.cal","RE.ver","R2.cal","R2.ver","dw","se.cal","se.ver","ar1","ma1","int","se.ar1","se.ma1","se.int") cal<-1902:1980;ver<-1856:1901 for (i in 1:8) { z<-data.frame(proxy[,c(1,i+1)]) names(z)<-c("CRU","proxy") fm<-lm(CRU~proxy,data=z[cal,]) predict0<-ts(predict(fm,newdata=z),start=1) se.ver<- sqrt( sum((predict0[1856:1901]-CRU[1:(1901-1855)])^2)/(1901-1856)) se.cal<- sqrt( sum((predict0[1902:1980]-CRU[(1902-1855):(1980-1855)])^2)/(1980-1902)) test<-unlist(verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)))[1:4] stat.regress[i,1:4]<-test stat.regress[i,5]<-dwtest(fm)$statistic stat.regress[i,6:7]<-c(se.cal,se.ver) test<-arima(fm$residuals,order=c(1,0,1)) fred<-c(test$coef,sqrt(diag(test$var.coef))) stat.regress[i,8:13]<-fred } id<-dimnames(proxy)[[2]] row.names(stat.regress)<-id[2:9] Estimate Std. Error t value Pr(>|t|) (Intercept) -7.98e-18 1.77e-02 -4.5e-16 1 J98 4.93e-01 8.75e-02 5.63 2.8e-07 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.157 on 77 degrees of freedom Multiple R-Squared: 0.292, Adjusted R-squared: 0.283 F-statistic: 31.7 on 1 and 77 DF, p-value: 2.76e-07 verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) # RE.cal RE.ver R2.cal R2.ver CE sign.test prod.test RE.risk RE.bias RE.covar #1 0.292 0.436 0.292 0.0301 -0.269 0.5 0.703 -0.647 0.856 0.101 dwtest(fm)##DW = 0.994, p-value = 5.066e-07 arima(fm$residuals,order=c(1,0,1)) # ar1 ma1 intercept # 0.928 -0.687 0.009 #s.e. 0.087 0.199 0.058 #sigma^2 estimated as 0.0177: log likelihood = 46.9, aic = -85.9 resid.fm<-predict0[1856:1980]-proxy$CRU[1856:1980] arima(resid.fm,order=c(1,0,1)) # ar1 ma1 intercept # 0.677 -0.303 -0.016 #s.e. 0.188 0.254 0.026 #sigma^2 estimated as 0.0182: log likelihood = 72.7, aic = -137 #REGRESSION 2 - MBH99 cal<-1902:1980;ver<-1856:1901 fm<-lm(CRU~MBH99,data=proxy[cal,]) summary(fm); predict0<-ts(predict(fm,newdata=proxy),start=1) Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -8.20e-18 9.81e-03 -8.4e-16 1 MBH99 9.01e-01 5.41e-02 16.7 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.0872 on 77 degrees of freedom Multiple R-Squared: 0.783, Adjusted R-squared: 0.78 F-statistic: 278 on 1 and 77 DF, p-value: <2e-16 verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) # RE.cal RE.ver R2.cal R2.ver CE sign.test prod.test RE.risk RE.bias RE.covar #1 0.783 0.56 0.783 0.165 0.0117 0.652 2.10 -0.692 0.909 0.215 dwtest(fm) #DW = 1.79, p-value = 0.1560 #alternative hypothesis: true autocorrelation is greater than 0 arima(fm$residuals,order=c(1,0,1)) #fine dwtest(fm,data=proxy[1856:1901,]) resid.fm<-predict0[1856:1980]-proxy$CRU[1856:1980] durbin.watson(resid.fm[1:55]) # 1.29 ##ESPER #REGRESSION 2 - MBH99 fm<-lm(CRU~Esp02,data=proxy[cal,]) summary(fm); predict0<-ts(predict(fm,newdata=proxy),start=1) verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) dwtest(fm) arima(fm$residuals,order=c(1,0,1)) #fine lm(formula = CRU ~ Esp02, data = proxy[cal, ]) Residuals: Min 1Q Median 3Q Max -0.2969 -0.1053 0.0215 0.0920 0.3150 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 0.0554 0.0177 3.13 0.0025 ** Esp02 0.5928 0.0796 7.45 1.2e-10 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.143 on 77 degrees of freedom Multiple R-Squared: 0.419, Adjusted R-squared: 0.411 F-statistic: 55.5 on 1 and 77 DF, p-value: 1.17e-10 > predict0<-ts(predict(fm,newdata=proxy),start=1) > verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) RE.cal RE.ver R2.cal R2.ver CE sign.test prod.test RE.risk 1 0.419 0.356 0.419 0.0104 -0.448 0.609 0.897 -0.756 RE.bias RE.covar 1 0.942 0.0593 > dwtest(fm) Durbin-Watson test data: fm DW = 1.24, p-value = 0.0001491 alternative hypothesis: true autocorrelation is greater than 0 arima(fm$residuals,order=c(1,0,1)) #fine Call: arima(x = fm$residuals, order = c(1, 0, 1)) Coefficients: ar1 ma1 intercept 0.687 -0.369 0.000 s.e. 0.258 0.342 0.029 sigma^2 estimated as 0.0167: log likelihood = 49.4, aic = -90.8 arima(fm$residuals,order=c(1,0,0)) #fine ###MOBERG fm<-lm(CRU~Mob05,data=proxy[cal,]) summary(fm); predict0<-ts(predict(fm,newdata=proxy),start=1) verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) dwtest(fm) arima(fm$residuals,order=c(1,0,1)) #fine Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -0.00281 0.02107 -0.13 0.9 Mob05 0.09588 0.14215 0.67 0.5 Residual standard error: 0.186 on 76 degrees of freedom Multiple R-Squared: 0.00595, Adjusted R-squared: -0.00713 F-statistic: 0.455 on 1 and 76 DF, p-value: 0.502 > predict0<-ts(predict(fm,newdata=proxy),start=1) > verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) RE.cal RE.ver R2.cal R2.ver CE sign.test prod.test RE.risk 1 0.0239 0.170 0.00592 0.0356 -0.868 0.543 2.61 -0.0103 RE.bias RE.covar 1 0.108 0.0143 > dwtest(fm) Durbin-Watson test data: fm DW = 0.532, p-value = 1.886e-15 alternative hypothesis: true autocorrelation is greater than 0 > arima(fm$residuals,order=c(1,0,1)) #fine Call: arima(x = fm$residuals, order = c(1, 0, 1)) Coefficients: ar1 ma1 intercept 0.977 -0.702 -0.040 s.e. 0.025 0.088 0.122 sigma^2 estimated as 0.0137: log likelihood = 55.9, aic = -104 ###CROWLEY fm<-lm(CRU~CL00,data=proxy[cal,]) summary(fm); predict0<-ts(predict(fm,newdata=proxy),start=1) verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) dwtest(fm) arima(fm$residuals,order=c(1,0,1)) #fine Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -8.09e-18 1.97e-02 -4.1e-16 1.0000 CL00 1.16e+00 3.53e-01 3.29 0.0015 ** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.175 on 77 degrees of freedom Multiple R-Squared: 0.124, Adjusted R-squared: 0.112 F-statistic: 10.9 on 1 and 77 DF, p-value: 0.00149 > predict0<-ts(predict(fm,newdata=proxy),start=1) > verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) RE.cal RE.ver R2.cal R2.ver CE sign.test prod.test RE.risk 1 0.124 0.372 0.124 0.0145 -0.412 0.5 1.46 -0.817 RE.bias RE.covar 1 1.02 0.0564 > dwtest(fm) Durbin-Watson test data: fm DW = 0.614, p-value = 1.068e-13 alternative hypothesis: true autocorrelation is greater than 0 > arima(fm$residuals,order=c(1,0,1)) #fine Call: arima(x = fm$residuals, order = c(1, 0, 1)) Coefficients: ar1 ma1 intercept 0.974 -0.677 -0.004 s.e. 0.038 0.140 0.124 sigma^2 estimated as 0.0150: log likelihood = 53.1, aic = -98.3 ###B01 fm<-lm(CRU~BJ01,data=proxy[cal,]) summary(fm); predict0<-ts(predict(fm,newdata=proxy),start=1) verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) dwtest(fm) arima(fm$residuals,order=c(1,0,1)) #fine Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -0.026 0.021 -1.24 0.22 BJ01 0.722 0.132 5.46 1.1e-06 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.162 on 57 degrees of freedom Multiple R-Squared: 0.344, Adjusted R-squared: 0.332 F-statistic: 29.9 on 1 and 57 DF, p-value: 1.07e-06 > predict0<-ts(predict(fm,newdata=proxy),start=1) > verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) RE.cal RE.ver R2.cal R2.ver CE sign.test prod.test RE.risk 1 0.448 0.489 0.389 0.170 -0.148 0.587 1.90 -0.528 RE.bias RE.covar 1 0.614 0.353 > dwtest(fm) Durbin-Watson test data: fm DW = 0.893, p-value = 1.116e-06 alternative hypothesis: true autocorrelation is greater than 0 > arima(fm$residuals,order=c(1,0,1)) #fine Call: arima(x = fm$residuals, order = c(1, 0, 1)) Coefficients: ar1 ma1 intercept 0.983 -0.773 0.007 s.e. 0.024 0.080 0.121 sigma^2 estimated as 0.0137: log likelihood = 42.1, aic = -76.3 ##B00 fm<-lm(CRU~BJ00,data=proxy[cal,]) summary(fm); predict0<-ts(predict(fm,newdata=proxy),start=1) verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) dwtest(fm) arima(fm$residuals,order=c(1,0,1)) #fine Estimate Std. Error t value Pr(>|t|) (Intercept) -5.34e-18 1.88e-02 -2.8e-16 1 BJ00 5.77e-01 1.31e-01 4.4 3.5e-05 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.167 on 77 degrees of freedom Multiple R-Squared: 0.201, Adjusted R-squared: 0.19 F-statistic: 19.3 on 1 and 77 DF, p-value: 3.45e-05 > predict0<-ts(predict(fm,newdata=proxy),start=1) > verification.stats(predict0,CRU,c(1902,1980),c(1856,1901)) RE.cal RE.ver R2.cal R2.ver CE sign.test prod.test RE.risk 1 0.201 0.333 0.201 0.00525 -0.501 0.5 1.08 -0.149 RE.bias RE.covar 1 0.337 0.0316 > dwtest(fm) Durbin-Watson test data: fm DW = 0.863, p-value = 7.649e-09 alternative hypothesis: true autocorrelation is greater than 0 > arima(fm$residuals,order=c(1,0,1)) #fine Call: arima(x = fm$residuals, order = c(1, 0, 1)) Coefficients: ar1 ma1 intercept 0.969 -0.766 -0.003 s.e. 0.035 0.093 0.085 sigma^2 estimated as 0.0173: log likelihood = 47.7, aic = -87.3 col0<-c("black", "blue4","blue1","yellow","dodgerblue2","orange","steelblue1","springgreen1", "red1" ) col00<-c(t(array (rep(col0[2:9],2),dim=c(8,2)))) stat.regress[2,4]<-0.02 ##apply MBH98 information par(mar=c(3,3,1,1)) barplot(stat.regress[,5],col=col0[2:9],font=2,names.arg=id[2:9],ylim=c(0,2));box() abline(h=1.5,lwd=2,col="red") par(bg="grey90") par(mar=c(3,3,2,1)) barplot(t(stat.regress[,3:4]),beside=TRUE,ylim=c(0,1),col=col00,font=2) title(main="Cross-Validation R2") load("c:/climate/data/crowley/crowley.versions.tab") dim(crowley.versions) X<-crowley.versions X<-ts(X,start=1000) X[,4]<-filter.combine.pad(X[,4],truncated.gauss.weights(21))[,2] par(bg="steelblue2") par(mar=c(3,3,1,1)) plot(1000:1982,X[,2],type="l",col="black",font=2,las=1,ylab="",lwd=1) lines(1000:1982,X[,4],col="yellow",lwd=2)