From 453618896ca618e0c1bc7f06f471bf082168fc4c Mon Sep 17 00:00:00 2001 From: lorenz-c Date: Tue, 25 Jun 2013 14:24:36 +0200 Subject: [PATCH] Initial commit --- ARIMA111.m | 13 + DataImpute.m | 51 ++ EOF.m | 74 +++ ICA_jade.m | 295 +++++++++++ aboxplot.m | 213 ++++++++ acf.m | 112 ++++ acf_mtrx.m | 19 + addaxes.m | 660 ++++++++++++++++++++++++ agg_glbwrf.m | 57 +++ area_wghts.m | 83 +++ blank.m | 35 ++ boxplot_data.m | 68 +++ catchmat2cell.m | 62 +++ cca.m | 34 ++ cca_ana.m | 432 ++++++++++++++++ cdiffcell.m | 103 ++++ cdiffts.m | 29 ++ cell2catchmat.m | 100 ++++ cell2netcdf.m | 122 +++++ cell2netcdf2.m | 57 +++ cellmovav.m | 61 +++ cells2database.m | 76 +++ celltrnd.m | 71 +++ center_ts.m | 103 ++++ centerts.m | 23 + central_diff.m | 83 +++ central_diff2d.m | 31 ++ chkfrmt.m | 141 +++++ ciplot.m | 38 ++ colorgrad.m | 53 ++ comp_cont_corr.m | 133 +++++ comp_cont_quant.m | 193 +++++++ comp_euro_prec.m | 190 +++++++ comp_euro_t2.m | 185 +++++++ comp_glob_corr.m | 121 +++++ comp_glob_prec.m | 543 ++++++++++++++++++++ comp_glob_quant.m | 189 +++++++ comp_glob_t2.m | 121 +++++ comp_nrgauges.m | 42 ++ comp_spat_corr.m | 130 +++++ comp_spat_mean.m | 213 ++++++++ comp_tspat_corr.m | 83 +++ comp_waterbalance.m | 183 +++++++ comp_zon_ann_prec.m | 101 ++++ comp_zon_prec.m | 160 ++++++ comp_zon_quant.m | 238 +++++++++ compare_sp.m | 259 ++++++++++ compute_zonal_prec_glob.m | 34 ++ condcopula.m | 11 + contourplots.m | 348 +++++++++++++ copulafun.m | 11 + correct_corr.m | 14 + correlate.m | 128 +++++ corrmaps.m | 24 + corrmaps_eof.m | 56 ++ cov2corr.m | 12 + covloc5thorder.m | 45 ++ crt_corr_ts.m | 529 +++++++++++++++++++ crt_eof_ts.m | 24 + crt_euro_ts.m | 406 +++++++++++++++ crt_glob_ts.m | 54 ++ crt_mnth_wb.m | 186 +++++++ crt_mnthly_taylor.m | 151 ++++++ crt_prec_ts_gmt.m | 248 +++++++++ crt_seasonal_taylor.m | 72 +++ crt_seasonal_ts.m | 578 +++++++++++++++++++++ crt_t2_ts.m | 110 ++++ crt_taylor.m | 82 +++ crt_tqv_ts.m | 30 ++ crt_ts.m | 63 +++ crt_tspat_corr.m | 34 ++ crt_zon_conts.m | 89 ++++ crt_zonal_plts.m | 265 ++++++++++ cs2sc.m | 40 ++ ctch_corr.m | 7 + cubft2mm.m | 25 + daysinmonth.m | 25 + degvar.m | 90 ++++ doiceana.m | 110 ++++ doy2date.m | 27 + dtevec.m | 91 ++++ ecdfbiv.m | 30 ++ ecopula.m | 24 + efk_example.m | 117 +++++ emp_cdf.m | 41 ++ emp_ks_tst.m | 85 +++ empcopula.m | 28 + emprand.m | 105 ++++ ens_bounds.m | 12 + eof.m | 158 ++++++ eof_ana.m | 540 +++++++++++++++++++ eof_new.m | 73 +++ eof_simple.m | 29 ++ errperf.m | 258 ++++++++++ evalcndcopula.m | 16 + fftspec.m | 58 +++ fill_ts.m | 45 ++ fillts.m | 63 +++ find_sim_tspts.m | 55 ++ findindx.m | 11 + findtstps.m | 126 +++++ findtstps_cell.m | 102 ++++ findtstps_old.m | 176 +++++++ findtstps_ts.m | 113 ++++ fitline.m | 26 + fixepsbbox.m | 47 ++ fld2gis.m | 43 ++ flx2div.m | 56 ++ flx2vimfd.m | 109 ++++ gen_mask.m | 82 +++ geodweekcomp.m | 163 ++++++ glob_corr.m | 55 ++ glob_prec_corr.m | 402 +++++++++++++++ glob_stats.m | 48 ++ grace_cov_mean.m | 71 +++ grace_pre.m | 64 +++ grib2netcdf.m | 105 ++++ grid2gmt.m | 51 ++ gridfit.m | 1025 +++++++++++++++++++++++++++++++++++++ harm_ana.m | 80 +++ haversine.m | 10 + inregion.m | 149 ++++++ iqrcell.m | 15 + iqrts.m | 12 + isint.m | 20 + isscal.m | 9 + isvec.m | 10 + jadeR.m | 483 +++++++++++++++++ jbtest.m | 32 ++ kalman_falling_body.m | 167 ++++++ kendallstau.m | 53 ++ kstest_matlab.m | 178 +++++++ lagged_matrix.m | 28 + latmn.m | 13 + ldlt.m | 49 ++ lm_sort.m | 69 +++ load_data.m | 42 ++ lsft.m | 67 +++ make_movie.m | 60 +++ mat2netcdf.m | 39 ++ mat2vec.m | 40 ++ matrix_corr.m | 92 ++++ matrixcorr.m | 14 + mc_lags.m | 23 + mergets.m | 66 +++ mmmnth2mmday.m | 85 +++ mmstommmonth.m | 23 + mnth_yr_vec.m | 29 ++ mnthnms.m | 56 ++ mod01.m | 205 ++++++++ mtit.m | 164 ++++++ mtrxerrs.m | 122 +++++ nancorr.m | 24 + nanrmse.m | 22 + netcdf2mat.m | 97 ++++ ns_coeff.m | 25 + outlierdect.m | 18 + pdfplot.m | 40 ++ plfit.m | 92 ++++ plot_taylor.m | 80 +++ plot_taylor_t2.m | 61 +++ ploteofts.m | 29 ++ plotglbl.m | 33 ++ plotglblts.m | 29 ++ ploticas.m | 57 +++ plotrgnl.m | 118 +++++ plotspec.m | 47 ++ plwmtrx.m | 76 +++ prep_cfsr_data.m | 41 ++ prep_data.m | 11 + prep_grace.m | 128 +++++ prep_grace_cov.m | 67 +++ quartile.m | 42 ++ random_kalman.m | 42 ++ read_cfsr.m | 55 ++ read_cpc.m | 44 ++ read_del.m | 62 +++ read_ecmwf.m | 26 + read_fluxes.m | 235 +++++++++ read_gleam.m | 58 +++ read_gpcc.m | 52 ++ read_gpcp.m | 46 ++ read_ism.m | 401 +++++++++++++++ read_merra.m | 66 +++ read_netcdf.m | 20 + read_ssmis_averaged_v7.m | 62 +++ read_tmi_averaged_v4.m | 62 +++ read_usgs.m | 12 + read_wrf_prec.m | 25 + recon_eeofs.m | 24 + reconeof.m | 24 + reg_flt.m | 269 ++++++++++ reg_stats.m | 32 ++ regrid.m | 71 +++ remmn.m | 18 + remmnthmn.m | 18 + remsc.m | 26 + rotateticklabel.m | 58 +++ sampleacf.m | 16 + satboxplot.m | 58 +++ sc2cs.m | 24 + scalecov.m | 46 ++ scatter_line.m | 53 ++ showeofana.m | 17 + signal_map.m | 27 + sortem.m | 31 ++ spat_agg.m | 55 ++ spat_agg_corr.m | 84 +++ spat_mean.m | 81 +++ spataggmn.m | 189 +++++++ spataggmn_flist.m | 41 ++ spataggmn_new.m | 195 +++++++ spataggmn_old.m | 178 +++++++ spatcoor.m | 46 ++ spatmn.m | 199 +++++++ spher2cart.m | 17 + spherdist.m | 34 ++ ssa.m | 96 ++++ sub_mac.m | 14 + svd_ana.m | 177 +++++++ t_test_corr.m | 15 + tavgflds.m | 12 + taylor_stats.m | 38 ++ taylor_stats_2d.m | 113 ++++ taylor_stats_cont.m | 52 ++ taylordiag.m | 530 +++++++++++++++++++ taylordiag_new.m | 612 ++++++++++++++++++++++ taylordiag_test.m | 98 ++++ testforwhitenoise.m | 40 ++ trajectory_kalman1.m | 115 +++++ trajectory_kalman2.m | 117 +++++ trajectory_ls.m | 112 ++++ trend.m | 17 + ts2gmt.m | 66 +++ ts2netcdf.m | 52 ++ tsbias.m | 19 + tseval.m | 209 ++++++++ tsinterp.m | 34 ++ tskalman.m | 390 ++++++++++++++ tskinvsq.m | 86 ++++ tsmean.m | 134 +++++ tsmovav.m | 14 + tsplot.m | 101 ++++ varimax.m | 92 ++++ varimax2.m | 76 +++ vec_curtosis.m | 23 + vec_moments.m | 34 ++ veccorr.m | 1 + vimfd_r.m | 47 ++ vincenty.m | 93 ++++ water_budget.m | 26 + wcdiff.m | 50 ++ wcdiffs_cell.m | 37 ++ wdiffs_cell.m | 32 ++ whitening.m | 8 + wmnpxl.m | 72 +++ wrf2glbl.m | 65 +++ xy2gmt.m | 12 + yrlplt.m | 11 + zblank.m | 40 ++ zgrid.m | 175 +++++++ zonal_contour_plot.m | 45 ++ ztransform.m | 23 + 263 files changed, 26079 insertions(+) create mode 100644 ARIMA111.m create mode 100644 DataImpute.m create mode 100644 EOF.m create mode 100644 ICA_jade.m create mode 100644 aboxplot.m create mode 100644 acf.m create mode 100644 acf_mtrx.m create mode 100644 addaxes.m create mode 100644 agg_glbwrf.m create mode 100644 area_wghts.m create mode 100644 blank.m create mode 100644 boxplot_data.m create mode 100644 catchmat2cell.m create mode 100644 cca.m create mode 100644 cca_ana.m create mode 100644 cdiffcell.m create mode 100644 cdiffts.m create mode 100644 cell2catchmat.m create mode 100644 cell2netcdf.m create mode 100644 cell2netcdf2.m create mode 100644 cellmovav.m create mode 100644 cells2database.m create mode 100644 celltrnd.m create mode 100644 center_ts.m create mode 100644 centerts.m create mode 100644 central_diff.m create mode 100644 central_diff2d.m create mode 100755 chkfrmt.m create mode 100644 ciplot.m create mode 100644 colorgrad.m create mode 100644 comp_cont_corr.m create mode 100644 comp_cont_quant.m create mode 100644 comp_euro_prec.m create mode 100644 comp_euro_t2.m create mode 100644 comp_glob_corr.m create mode 100644 comp_glob_prec.m create mode 100644 comp_glob_quant.m create mode 100644 comp_glob_t2.m create mode 100644 comp_nrgauges.m create mode 100644 comp_spat_corr.m create mode 100644 comp_spat_mean.m create mode 100644 comp_tspat_corr.m create mode 100644 comp_waterbalance.m create mode 100644 comp_zon_ann_prec.m create mode 100644 comp_zon_prec.m create mode 100644 comp_zon_quant.m create mode 100644 compare_sp.m create mode 100644 compute_zonal_prec_glob.m create mode 100644 condcopula.m create mode 100644 contourplots.m create mode 100644 copulafun.m create mode 100644 correct_corr.m create mode 100644 correlate.m create mode 100644 corrmaps.m create mode 100644 corrmaps_eof.m create mode 100644 cov2corr.m create mode 100644 covloc5thorder.m create mode 100644 crt_corr_ts.m create mode 100644 crt_eof_ts.m create mode 100644 crt_euro_ts.m create mode 100644 crt_glob_ts.m create mode 100644 crt_mnth_wb.m create mode 100644 crt_mnthly_taylor.m create mode 100644 crt_prec_ts_gmt.m create mode 100644 crt_seasonal_taylor.m create mode 100644 crt_seasonal_ts.m create mode 100644 crt_t2_ts.m create mode 100644 crt_taylor.m create mode 100644 crt_tqv_ts.m create mode 100644 crt_ts.m create mode 100644 crt_tspat_corr.m create mode 100644 crt_zon_conts.m create mode 100644 crt_zonal_plts.m create mode 100755 cs2sc.m create mode 100644 ctch_corr.m create mode 100644 cubft2mm.m create mode 100644 daysinmonth.m create mode 100644 degvar.m create mode 100644 doiceana.m create mode 100644 doy2date.m create mode 100644 dtevec.m create mode 100644 ecdfbiv.m create mode 100644 ecopula.m create mode 100644 efk_example.m create mode 100644 emp_cdf.m create mode 100644 emp_ks_tst.m create mode 100644 empcopula.m create mode 100644 emprand.m create mode 100644 ens_bounds.m create mode 100644 eof.m create mode 100644 eof_ana.m create mode 100644 eof_new.m create mode 100644 eof_simple.m create mode 100644 errperf.m create mode 100644 evalcndcopula.m create mode 100644 fftspec.m create mode 100644 fill_ts.m create mode 100644 fillts.m create mode 100644 find_sim_tspts.m create mode 100644 findindx.m create mode 100644 findtstps.m create mode 100644 findtstps_cell.m create mode 100644 findtstps_old.m create mode 100644 findtstps_ts.m create mode 100644 fitline.m create mode 100644 fixepsbbox.m create mode 100644 fld2gis.m create mode 100644 flx2div.m create mode 100644 flx2vimfd.m create mode 100644 gen_mask.m create mode 100644 geodweekcomp.m create mode 100644 glob_corr.m create mode 100644 glob_prec_corr.m create mode 100644 glob_stats.m create mode 100644 grace_cov_mean.m create mode 100644 grace_pre.m create mode 100644 grib2netcdf.m create mode 100644 grid2gmt.m create mode 100644 gridfit.m create mode 100644 harm_ana.m create mode 100644 haversine.m create mode 100644 inregion.m create mode 100644 iqrcell.m create mode 100644 iqrts.m create mode 100644 isint.m create mode 100644 isscal.m create mode 100644 isvec.m create mode 100644 jadeR.m create mode 100644 jbtest.m create mode 100644 kalman_falling_body.m create mode 100644 kendallstau.m create mode 100644 kstest_matlab.m create mode 100644 lagged_matrix.m create mode 100644 latmn.m create mode 100644 ldlt.m create mode 100755 lm_sort.m create mode 100644 load_data.m create mode 100644 lsft.m create mode 100644 make_movie.m create mode 100644 mat2netcdf.m create mode 100755 mat2vec.m create mode 100644 matrix_corr.m create mode 100644 matrixcorr.m create mode 100644 mc_lags.m create mode 100644 mergets.m create mode 100644 mmmnth2mmday.m create mode 100644 mmstommmonth.m create mode 100644 mnth_yr_vec.m create mode 100644 mnthnms.m create mode 100644 mod01.m create mode 100644 mtit.m create mode 100644 mtrxerrs.m create mode 100644 nancorr.m create mode 100644 nanrmse.m create mode 100755 netcdf2mat.m create mode 100644 ns_coeff.m create mode 100644 outlierdect.m create mode 100644 pdfplot.m create mode 100644 plfit.m create mode 100644 plot_taylor.m create mode 100644 plot_taylor_t2.m create mode 100644 ploteofts.m create mode 100644 plotglbl.m create mode 100644 plotglblts.m create mode 100644 ploticas.m create mode 100644 plotrgnl.m create mode 100644 plotspec.m create mode 100644 plwmtrx.m create mode 100644 prep_cfsr_data.m create mode 100644 prep_data.m create mode 100644 prep_grace.m create mode 100644 prep_grace_cov.m create mode 100644 quartile.m create mode 100644 random_kalman.m create mode 100644 read_cfsr.m create mode 100644 read_cpc.m create mode 100644 read_del.m create mode 100644 read_ecmwf.m create mode 100644 read_fluxes.m create mode 100644 read_gleam.m create mode 100644 read_gpcc.m create mode 100644 read_gpcp.m create mode 100644 read_ism.m create mode 100644 read_merra.m create mode 100644 read_netcdf.m create mode 100644 read_ssmis_averaged_v7.m create mode 100644 read_tmi_averaged_v4.m create mode 100644 read_usgs.m create mode 100644 read_wrf_prec.m create mode 100644 recon_eeofs.m create mode 100644 reconeof.m create mode 100644 reg_flt.m create mode 100644 reg_stats.m create mode 100644 regrid.m create mode 100644 remmn.m create mode 100644 remmnthmn.m create mode 100644 remsc.m create mode 100644 rotateticklabel.m create mode 100644 sampleacf.m create mode 100644 satboxplot.m create mode 100755 sc2cs.m create mode 100644 scalecov.m create mode 100644 scatter_line.m create mode 100644 showeofana.m create mode 100644 signal_map.m create mode 100644 sortem.m create mode 100644 spat_agg.m create mode 100644 spat_agg_corr.m create mode 100644 spat_mean.m create mode 100644 spataggmn.m create mode 100644 spataggmn_flist.m create mode 100644 spataggmn_new.m create mode 100644 spataggmn_old.m create mode 100644 spatcoor.m create mode 100644 spatmn.m create mode 100644 spher2cart.m create mode 100644 spherdist.m create mode 100644 ssa.m create mode 100644 sub_mac.m create mode 100644 svd_ana.m create mode 100644 t_test_corr.m create mode 100644 tavgflds.m create mode 100644 taylor_stats.m create mode 100644 taylor_stats_2d.m create mode 100644 taylor_stats_cont.m create mode 100644 taylordiag.m create mode 100644 taylordiag_new.m create mode 100644 taylordiag_test.m create mode 100644 testforwhitenoise.m create mode 100644 trajectory_kalman1.m create mode 100644 trajectory_kalman2.m create mode 100644 trajectory_ls.m create mode 100644 trend.m create mode 100644 ts2gmt.m create mode 100644 ts2netcdf.m create mode 100644 tsbias.m create mode 100644 tseval.m create mode 100644 tsinterp.m create mode 100644 tskalman.m create mode 100644 tskinvsq.m create mode 100644 tsmean.m create mode 100644 tsmovav.m create mode 100644 tsplot.m create mode 100644 varimax.m create mode 100644 varimax2.m create mode 100644 vec_curtosis.m create mode 100644 vec_moments.m create mode 120000 veccorr.m create mode 100644 vimfd_r.m create mode 100644 vincenty.m create mode 100644 water_budget.m create mode 100644 wcdiff.m create mode 100644 wcdiffs_cell.m create mode 100644 wdiffs_cell.m create mode 100644 whitening.m create mode 100644 wmnpxl.m create mode 100644 wrf2glbl.m create mode 100644 xy2gmt.m create mode 100644 yrlplt.m create mode 100644 zblank.m create mode 100644 zgrid.m create mode 100644 zonal_contour_plot.m create mode 100644 ztransform.m diff --git a/ARIMA111.m b/ARIMA111.m new file mode 100644 index 0000000..9617815 --- /dev/null +++ b/ARIMA111.m @@ -0,0 +1,13 @@ +function pred = ARIMA111(y) + predictions=1; + p=1; + q=1; + yy = y(1); + y = diff(y); + Spec = garchset('R',p,'M',q,'C',NaN,'VarianceModel','Constant'); + [EstSpec,EstSE] = garchfit(Spec,y); + [sigmaForecast,meanForecast] = garchpred(EstSpec,y,predictions); + pred = cumsum([yy; y; meanForecast]) + keyboard + pred = pred(end-predictions+1:end); +end \ No newline at end of file diff --git a/DataImpute.m b/DataImpute.m new file mode 100644 index 0000000..b38de68 --- /dev/null +++ b/DataImpute.m @@ -0,0 +1,51 @@ +function [Imputed_data]=DataImpute(Data,method) + +% mathod: 1 : monthly mean +% 2 : interpolation + +% Data format: first column : year +% Second column : month +% The value should be started from third column (there is no limitation in number of columns) + +[row col]=size(Data); +Imputed_data=Data; + +%% Imputing the mean monthly values to NaN values +if method==1 + f1=find(Data(:,2)==1);f2=find(Data(:,2)==2); + f3=find(Data(:,2)==3);f4=find(Data(:,2)==4); + f5=find(Data(:,2)==5);f6=find(Data(:,2)==6); + f7=find(Data(:,2)==7);f8=find(Data(:,2)==8); + f9=find(Data(:,2)==9);f10=find(Data(:,2)==10); + f11=find(Data(:,2)==11);f12=find(Data(:,2)==12); + Jan1=zeros(1,col-2); Feb1=zeros(1,col-2); Mar1=zeros(1,col-2); + Apr1=zeros(1,col-2); May1=zeros(1,col-2); Jun1=zeros(1,col-2); + Jul1=zeros(1,col-2); Aug1=zeros(1,col-2); Sep1=zeros(1,col-2); + Oct1=zeros(1,col-2); Nov1=zeros(1,col-2); Dec1=zeros(1,col-2); + M_Data=zeros(12,col-2); + + for i=3:col + Jan1(1,i-2)=mean(Data(f1(find(isnan(Data(f1,i))==0)),i));Feb1(1,i-2)=mean(Data(f2(find(isnan(Data(f2,i))==0)),i)); + Mar1(1,i-2)=mean(Data(f3(find(isnan(Data(f2,i))==0)),i));Apr1(1,i-2)=mean(Data(f4(find(isnan(Data(f4,i))==0)),i)); + May1(1,i-2)=mean(Data(f5(find(isnan(Data(f5,i))==0)),i));Jun1(1,i-2)=mean(Data(f6(find(isnan(Data(f6,i))==0)),i)); + Jul1(1,i-2)=mean(Data(f7(find(isnan(Data(f7,i))==0)),i));Aug1(1,i-2)=mean(Data(f8(find(isnan(Data(f8,i))==0)),i)); + Sep1(1,i-2)=mean(Data(f9(find(isnan(Data(f9,i))==0)),i));Oct1(1,i-2)=mean(Data(f10(find(isnan(Data(f10,i))==0)),i)); + Nov1(1,i-2)=mean(Data(f11(find(isnan(Data(f11,i))==0)),i));Dec1(1,i-2)=mean(Data(f12(find(isnan(Data(f12,i))==0)),i)); + end + + M_Data=[Jan1;Feb1;Mar1;Apr1;May1;Jun1;Jul1;Aug1;Sep1;Oct1;Nov1;Dec1]; + + + [m,n]=find(isnan(Data)==1); + for i=1:length(m) + Imputed_data(m(i),n(i))=M_Data(Data(m(i),2),n(i)-2); + end +end + +%% Interpolation +if method==2 + [m n]=find(isnan(Data)==1); + for i=1:length(m) + Imputed_data(m(i),n(i))=(Data(m(i)-1,n(i))+Data(m(i)+1,n(i)))/2; + end +end \ No newline at end of file diff --git a/EOF.m b/EOF.m new file mode 100644 index 0000000..1dc38f4 --- /dev/null +++ b/EOF.m @@ -0,0 +1,74 @@ +function [L, EOFs, EC, error, norms] = EOF( U, n, norm, varargin ) +% EOF - computes EOF of a matrix. +% +% Usage: [L, EOFs, EC, error, norms] = EOF( M, num, norm, ... ) +% +% M is the matrix on which to perform the EOF. num is the number of EOFs to +% return. If num='all', then all EOFs are returned. This is the default. +% +% If norm is true, then all time series are normalized by their standard +% deviation before EOFs are computed. Default is false. In this case, +% the fifth output argument will be the standard deviations of each column. +% +% ... are extra arguments to be given to the svds function. These will +% be ignored in the case that all EOFs are to be returned, in which case +% the svd function is used instead. Use these with care. +% +% Data is not detrended before handling. Use the detrend function to fix +% that. +% +% L are the eigenvalues of the covariance matrix ( ie. they are normalized +% by 1/(m-1), where m is the number of rows ). EC are the expansion +% coefficients (PCs in other terminology) and error is the reconstruction +% error (L2-norm). +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +% $Id: EOF.m,v 1.3 2003/06/01 22:20:23 dmk Exp $ +% +% Copyright (C) 2001 David M. Kaplan +% Licence: GPL (Gnu Public License) +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +if nargin < 2 + n = 'all'; +end + +if nargin < 3 + norm = 0==1; +end + +s = size(U); +ss = min(s); + +% Normalize by standard deviation if desired. +if norm + norms = std(U); +else + norms = ones([1,s(2)]); +end +U = U * diag(1./norms); + +% Do SVD +if (ischar(n) & n == 'all') | n >= ss + % Use svd in case we want all EOFs - quicker. + [ C, lambda, EOFs ] = svd( full(U) ); +else + % Otherwise use svds. + [ C, lambda, EOFs, flag ] = svds( U, n, varargin{:} ); + + if flag % Case where things did not converge - probably an error. + warning( 'HFRC_utility - Eigenvalues did not seem to converge!!!' ); + end + +end + +% Compute EC's and L +EC = C * lambda; % Expansion coefficients. +L = diag( lambda ) .^ 2 / (s(1)-1); % eigenvalues. + +% Compute error. +diff=(U-EC*EOFs'); +error=sqrt( sum( diff .* conj(diff) ) ); + diff --git a/ICA_jade.m b/ICA_jade.m new file mode 100644 index 0000000..6ce788d --- /dev/null +++ b/ICA_jade.m @@ -0,0 +1,295 @@ +function B = ICA_jade(X, B) +verbose = 1 +[m, T] = size(X); + + +%% Reshaping of the data, hoping to speed up things a little bit... +X = X'; + +dimsymm = (m*(m+1))/2; % Dim. of the space of real symm matrices +nbcm = dimsymm ; % number of cumulant matrices +CM = zeros(m,m*nbcm); % Storage for cumulant matrices +R = eye(m); %% +Qij = zeros(m); % Temp for a cum. matrix +Xim = zeros(m,1); % Temp +Xijm = zeros(m,1); % Temp +Uns = ones(1,m); % for convenience + + +%% I am using a symmetry trick to save storage. I should write a short note one of these +%% days explaining what is going on here. +%% +Range = 1:m ; % will index the columns of CM where to store the cum. mats. + +for im = 1:m + Xim = X(:,im) ; + Xijm= Xim.*Xim ; + %% Note to myself: the -R on next line can be removed: it does not affect + %% the joint diagonalization criterion + Qij = ((Xijm(:,Uns).*X)' * X)/T - R - 2 * R(:,im)*R(:,im)' ; + CM(:,Range) = Qij ; + Range = Range + m ; + for jm = 1:im-1 + Xijm = Xim.*X(:,jm) ; + Qij = sqrt(2) *(((Xijm(:,Uns).*X)' * X)/T - R(:,im)*R(:,jm)' - R(:,jm)*R(:,im)') ; + CM(:,Range) = Qij ; + Range = Range + m ; + end ; +end; +%%%% Now we have nbcm = m(m+1)/2 cumulants matrices stored in a big m x m*nbcm array. + + + +%%% joint diagonalization of the cumulant matrices +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + +%% Init +if 0, %% Init by diagonalizing a *single* cumulant matrix. It seems to save + %% some computation time `sometimes'. Not clear if initialization is really worth + %% it since Jacobi rotations are very efficient. On the other hand, it does not + %% cost much... + + fprintf('jade -> Initialization of the diagonalization\n'); + [V,D] = eig(CM(:,1:m)); % Selectng a particular cumulant matrix. + for u=1:m:m*nbcm, % Accordingly updating the cumulant set given the init + CM(:,u:u+m-1) = CM(:,u:u+m-1)*V ; + end; + CM = V'*CM; + +else, %% The dont-try-to-be-smart init + V = eye(m) ; % la rotation initiale +end; + +%% Computing the initial value of the contrast +Diag = zeros(m,1) ; +On = 0 ; +Range = 1:m ; +for im = 1:nbcm, + Diag = diag(CM(:,Range)) ; + On = On + sum(Diag.*Diag) ; + Range = Range + m ; +end +Off = sum(sum(CM.*CM)) - On ; + + + +seuil = 1.0e-6 / sqrt(T) ; % A statistically scaled threshold on `small' angles +encore = 1; +sweep = 0; % sweep number +updates = 0; % Total number of rotations +upds = 0; % Number of rotations in a given seep +g = zeros(2,nbcm); +gg = zeros(2,2); +G = zeros(2,2); +c = 0 ; +s = 0 ; +ton = 0 ; +toff = 0 ; +theta = 0 ; +Gain = 0 ; + +%% Joint diagonalization proper +if verbose, fprintf('jade -> Contrast optimization by joint diagonalization\n'); end + +while encore, encore=0; + + if verbose, fprintf('jade -> Sweep #%3d',sweep); end + sweep = sweep+1; + upds = 0 ; + Vkeep = V ; + + for p=1:m-1, + for q=p+1:m, + + Ip = p:m:m*nbcm ; + Iq = q:m:m*nbcm ; + + %%% computation of Givens angle + g = [ CM(p,Ip)-CM(q,Iq) ; CM(p,Iq)+CM(q,Ip) ]; + gg = g*g'; + ton = gg(1,1)-gg(2,2); + toff = gg(1,2)+gg(2,1); + theta = 0.5*atan2( toff , ton+sqrt(ton*ton+toff*toff) ); + Gain = (sqrt(ton*ton+toff*toff) - ton) / 4 ; + + %%% Givens update + if abs(theta) > seuil, +%% if Gain > 1.0e-3*On/m/m , + encore = 1 ; + upds = upds + 1; + c = cos(theta); + s = sin(theta); + G = [ c -s ; s c ] ; + + pair = [p;q] ; + V(:,pair) = V(:,pair)*G ; + CM(pair,:) = G' * CM(pair,:) ; + CM(:,[Ip Iq]) = [ c*CM(:,Ip)+s*CM(:,Iq) -s*CM(:,Ip)+c*CM(:,Iq) ] ; + + + On = On + Gain; + Off = Off - Gain; + + %% fprintf('jade -> %3d %3d %12.8f\n',p,q,Off/On); + end%%of the if + end%%of the loop on q + end%%of the loop on p + if verbose, fprintf(' completed in %d rotations\n',upds); end + updates = updates + upds ; + +end%%of the while loop +if verbose, fprintf('jade -> Total of %d Givens rotations\n',updates); end + + +%%% A separating matrix +% =================== +B = V'*B ; + + +%%% Permut the rows of the separating matrix B to get the most energetic components first. +%%% Here the **signals** are normalized to unit variance. Therefore, the sort is +%%% according to the norm of the columns of A = pinv(B) + +if verbose, fprintf('jade -> Sorting the components\n',updates); end +A = pinv(B) ; +[Ds,keys] = sort(sum(A.*A)) ; +B = B(keys,:) ; +B = B(m:-1:1,:) ; % Is this smart ? + + +% Signs are fixed by forcing the first column of B to have non-negative entries. + +if verbose, fprintf('jade -> Fixing the signs\n',updates); end +b = B(:,1) ; +signs = sign(sign(b)+0.1) ; % just a trick to deal with sign=0 +B = diag(signs)*B ; + + + +return ; + + +% To do. +% - Implement a cheaper/simpler whitening (is it worth it?) +% +% Revision history: +% +%- V1.8, May 2005 +% - Added some commented code to explain the cumulant computation tricks. +% - Added reference to the Neural Comp. paper. +% +%- V1.7, Nov. 16, 2002 +% - Reverted the mean removal code to an earlier version (not using +% repmat) to keep the code octave-compatible. Now less efficient, +% but does not make any significant difference wrt the total +% computing cost. +% - Remove some cruft (some debugging figures were created. What +% was this stuff doing there???) +% +% +%- V1.6, Feb. 24, 1997 +% - Mean removal is better implemented. +% - Transposing X before computing the cumulants: small speed-up +% - Still more comments to emphasize the relationship to PCA +% +%- V1.5, Dec. 24 1997 +% - The sign of each row of B is determined by letting the first element be positive. +% +%- V1.4, Dec. 23 1997 +% - Minor clean up. +% - Added a verbose switch +% - Added the sorting of the rows of B in order to fix in some reasonable way the +% permutation indetermination. See note 2) below. +% +%- V1.3, Nov. 2 1997 +% - Some clean up. Released in the public domain. +% +%- V1.2, Oct. 5 1997 +% - Changed random picking of the cumulant matrix used for initialization to a +% deterministic choice. This is not because of a better rationale but to make the +% ouput (almost surely) deterministic. +% - Rewrote the joint diag. to take more advantage of Matlab's tricks. +% - Created more dummy variables to combat Matlab's loose memory management. +% +%- V1.1, Oct. 29 1997. +% Made the estimation of the cumulant matrices more regular. This also corrects a +% buglet... +% +%- V1.0, Sept. 9 1997. Created. +% +% Main references: +% @article{CS-iee-94, +% title = "Blind beamforming for non {G}aussian signals", +% author = "Jean-Fran\c{c}ois Cardoso and Antoine Souloumiac", +% HTML = "ftp://sig.enst.fr/pub/jfc/Papers/iee.ps.gz", +% journal = "IEE Proceedings-F", +% month = dec, number = 6, pages = {362-370}, volume = 140, year = 1993} +% +% +%@article{JADE:NC, +% author = "Jean-Fran\c{c}ois Cardoso", +% journal = "Neural Computation", +% title = "High-order contrasts for independent component analysis", +% HTML = "http://www.tsi.enst.fr/~cardoso/Papers.PS/neuralcomp_2ppf.ps", +% year = 1999, month = jan, volume = 11, number = 1, pages = "157-192"} +% +% +% +% +% Notes: +% ====== +% +% Note 1) The original Jade algorithm/code deals with complex signals in Gaussian noise +% white and exploits an underlying assumption that the model of independent components +% actually holds. This is a reasonable assumption when dealing with some narrowband +% signals. In this context, one may i) seriously consider dealing precisely with the +% noise in the whitening process and ii) expect to use the small number of significant +% eigenmatrices to efficiently summarize all the 4th-order information. All this is done +% in the JADE algorithm. +% +% In *this* implementation, we deal with real-valued signals and we do NOT expect the ICA +% model to hold exactly. Therefore, it is pointless to try to deal precisely with the +% additive noise and it is very unlikely that the cumulant tensor can be accurately +% summarized by its first n eigen-matrices. Therefore, we consider the joint +% diagonalization of the *whole* set of eigen-matrices. However, in such a case, it is +% not necessary to compute the eigenmatrices at all because one may equivalently use +% `parallel slices' of the cumulant tensor. This part (computing the eigen-matrices) of +% the computation can be saved: it suffices to jointly diagonalize a set of cumulant +% matrices. Also, since we are dealing with reals signals, it becomes easier to exploit +% the symmetries of the cumulants to further reduce the number of matrices to be +% diagonalized. These considerations, together with other cheap tricks lead to this +% version of JADE which is optimized (again) to deal with real mixtures and to work +% `outside the model'. As the original JADE algorithm, it works by minimizing a `good +% set' of cumulants. +% +% +% Note 2) The rows of the separating matrix B are resorted in such a way that the columns +% of the corresponding mixing matrix A=pinv(B) are in decreasing order of (Euclidian) +% norm. This is a simple, `almost canonical' way of fixing the indetermination of +% permutation. It has the effect that the first rows of the recovered signals (ie the +% first rows of B*X) correspond to the most energetic *components*. Recall however that +% the source signals in S=B*X have unit variance. Therefore, when we say that the +% observations are unmixed in order of decreasing energy, this energetic signature is to +% be found as the norm of the columns of A=pinv(B) and not as the variances of the +% separated source signals. +% +% +% Note 3) In experiments where JADE is run as B=jadeR(X,m) with m varying in range of +% values, it is nice to be able to test the stability of the decomposition. In order to +% help in such a test, the rows of B can be sorted as described above. We have also +% decided to fix the sign of each row in some arbitrary but fixed way. The convention is +% that the first element of each row of B is positive. +% +% +% Note 4) Contrary to many other ICA algorithms, JADE (or least this version) does not +% operate on the data themselves but on a statistic (the full set of 4th order cumulant). +% This is represented by the matrix CM below, whose size grows as m^2 x m^2 where m is +% the number of sources to be extracted (m could be much smaller than n). As a +% consequence, (this version of) JADE will probably choke on a `large' number of sources. +% Here `large' depends mainly on the available memory and could be something like 40 or +% so. One of these days, I will prepare a version of JADE taking the `data' option +% rather than the `statistic' option. + + +% JadeR.m ends here. \ No newline at end of file diff --git a/aboxplot.m b/aboxplot.m new file mode 100644 index 0000000..e87b869 --- /dev/null +++ b/aboxplot.m @@ -0,0 +1,213 @@ +% +% Copyright (C) 2011-2012 Alex Bikfalvi +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 3 of the License, or (at +% your option) any later version. + +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. + +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +% + +function aboxplot(X,varargin) + +% Parameters +widthl = 0.7; +widths = 0.8; +widthe = 0.4; +outMarker = '.'; +outMarkerSize = 3; +outMarkerEdgeColor = [0.6 0.6 0.6]; +outMarkerFaceColor = [0.6 0.6 0.6]; +alpha = 0.05; +cmap = []; +colorrev = 0; +colorgrd = 'blue_down'; + +% Get the number or data matrices +if iscell(X) + d = length(X); +else + % If data is a matrix extend to a 3D array + if 2 == ndims(X) + X = reshape(X, [1,size(X)]); + end + d = size(X,1); +end; + +% Get the data size +if iscell(X) + n = size(X{1},2); +else + n = size(X,3); +end + +% Set the labels +labels = cell(n,1); +for i=1:n + labels{i} = num2str(i); +end + +% Optional arguments +optargin = size(varargin,2); + +i = 1; +while i <= optargin + switch lower(varargin{i}) + case 'labels' + labels = varargin{i+1}; + case 'colormap' + cmap = varargin{i+1}; + case 'colorgrad' + colorgrd = varargin{i+1}; + case 'colorrev' + colorrev = varargin{i+1}; + case 'outliermarker' + outMarker = varargin{i+1}; + case 'outliermarkersize' + outMarkerSize = varargin{i+1}; + case 'outliermarkeredgecolor' + outMarkerEdgeColor = varargin{i+1}; + case 'outliermarkerfacecolor' + outMarkerFaceColor = varargin{i+1}; + case 'widthl' + widthl = varargin{i+1}; + case 'widths' + widths = varargin{i+1}; + case 'widthe' + widthe = varargin{i+1}; + end + i = i + 2; +end + +% Colors +colors = cell(d,n); + +if colorrev + % Set colormap + if isempty(cmap) + cmap = colorgrad(n,colorgrd); + end + if size(cmap,1) ~= n + error('The number of colors in the colormap must equal n.'); + end + for j=1:d + for i=1:n + colors{j,i} = cmap(i,:); + end + end +else + % Set colormap + if isempty(cmap) + cmap = colorgrad(d,colorgrd); + end + if size(cmap,1) ~= d + error('The number of colors in the colormap must equal n.'); + end + for j=1:d + for i=1:n + colors{j,i} = cmap(j,:); + end + end +end + +xlim([0.5 n+0.5]); + +hgg = zeros(d,1); + +for j=1:d + % Get the j matrix + if iscell(X) + Y = X{j}; + else + Y = squeeze(X(j,:,:)); + end + + % Create a hggroup for each data set + hgg(j) = hggroup(); + set(get(get(hgg(j),'Annotation'),'LegendInformation'),'IconDisplayStyle','on'); + legendinfo(hgg(j),'patch',... + 'LineWidth',0.5,... + 'EdgeColor','k',... + 'FaceColor',colors{j,1},... + 'LineStyle','-',... + 'XData',[0 0 1 1 0],... + 'YData',[0 1 1 0 0]); + + for i=1:n + + % Calculate the mean and confidence intervals + [q1 q2 q3 fu fl ou ol] = quartile(Y(:,i)); + u = nanmean(Y(:,i)); + + % large interval [i - widthl/2 i + widthl/2] delta = widthl + % medium interval start: i - widthl/2 + (j-1) * widthl / d + % medium interval end: i - widthl/2 + j * widthl / d + % medium interval width: widthl / d + % medium interval middle: i-widthl/2+(2*j-1)*widthl/(2*d) + % small interval width: widths*widthl/d + % small interval start: i-widthl/2+(2*j-1-widths)*widthl/(2*d) + + % Plot outliers + hold on; + plot((i-widthl/2+(2*j-1)*widthl/(2*d)).*ones(size(ou)),ou,... + 'LineStyle','none',... + 'Marker',outMarker,... + 'MarkerSize',outMarkerSize,... + 'MarkerEdgeColor',outMarkerEdgeColor,... + 'MarkerFaceColor',outMarkerFaceColor,... + 'HitTest','off',... + 'Parent',hgg(j)); + plot((i-widthl/2+(2*j-1)*widthl/(2*d)).*ones(size(ol)),ol,... + 'LineStyle','none',... + 'Marker',outMarker,... + 'MarkerSize',outMarkerSize,... + 'MarkerEdgeColor',outMarkerEdgeColor,... + 'MarkerFaceColor',outMarkerFaceColor,... + 'HitTest','off',... + 'Parent',hgg(j)); + hold off; + + % Plot fence + line([i-widthl/2+(2*j-1)*widthl/(2*d) i-widthl/2+(2*j-1)*widthl/(2*d)],[fu fl],... + 'Color','k','LineStyle',':','HitTest','off','Parent',hgg(j)); + line([i-widthl/2+(2*j-1-widthe)*widthl/(2*d) i-widthl/2+(2*j-1+widthe)*widthl/(2*d)],[fu fu],... + 'Color','k','HitTest','off','Parent',hgg(j)); + line([i-widthl/2+(2*j-1-widthe)*widthl/(2*d) i-widthl/2+(2*j-1+widthe)*widthl/(2*d)],[fl fl],... + 'Color','k','HitTest','off','Parent',hgg(j)); + + % Plot quantile + if q3 > q1 + rectangle('Position',[i-widthl/2+(2*j-1-widths)*widthl/(2*d) q1 widths*widthl/d q3-q1],... + 'EdgeColor','k','FaceColor',colors{j,i},'HitTest','off','Parent',hgg(j)); + end + + % Plot median + line([i-widthl/2+(2*j-1-widths)*widthl/(2*d) i-widthl/2+(2*j-1+widths)*widthl/(2*d)],[q2 q2],... + 'Color','k','LineWidth',1,'HitTest','off','Parent',hgg(j)); + + % Plot mean + hold on; + plot(i-widthl/2+(2*j-1)*widthl/(2*d), u,... + 'LineStyle','none',... + 'Marker','o',... + 'MarkerEdgeColor','k',... + 'MarkerFaceColor',colors{j,i},... + 'HitTest','off','Parent',hgg(j)); + hold off; + end +end + +box on; + +set(gca,'XTick',1:n); +set(gca,'XTickLabel',labels); + +end \ No newline at end of file diff --git a/acf.m b/acf.m new file mode 100644 index 0000000..5c802dd --- /dev/null +++ b/acf.m @@ -0,0 +1,112 @@ +function ta = acf(y,p) +% ACF - Compute Autocorrelations Through p Lags +% >> myacf = acf(y,p) +% +% Inputs: +% y - series to compute acf for, nx1 column vector +% p - total number of lags, 1x1 integer +% +% Output: +% myacf - px1 vector containing autocorrelations +% (First lag computed is lag 1. Lag 0 not computed) +% +% +% A bar graph of the autocorrelations is also produced, with +% rejection region bands for testing individual autocorrelations = 0. +% +% Note that lag 0 autocorelation is not computed, +% and is not shown on this graph. +% +% Example: +% >> acf(randn(100,1), 10) +% + + +% -------------------------- +% USER INPUT CHECKS +% -------------------------- + +[n1, n2] = size(y) ; +if n2 ~=1 + error('Input series y must be an nx1 column vector') +end + +[a1, a2] = size(p) ; +if ~((a1==1 & a2==1) & (p abs(bar_hi)) % if rejection lines might not appear on graph + axis([0 p+.60 line_lo line_hi]) +else + axis([0 p+.60 bar_lo bar_hi]) +end +title({' ','Sample Autocorrelations',' '}) +xlabel('Lag Length') +set(gca,'YTick',[-1:.20:1]) +% set number of lag labels shown +if (p<28 & p>4) + set(gca,'XTick',floor(linspace(1,p,4))) +elseif (p>=28) + set(gca,'XTick',floor(linspace(1,p,8))) +end +set(gca,'TickLength',[0 0]) + + + + +% --------------- +% SUB FUNCTION +% --------------- +function ta2 = acf_k(y,k) +% ACF_K - Autocorrelation at Lag k +% acf(y,k) +% +% Inputs: +% y - series to compute acf for +% k - which lag to compute acf +% +global ybar +global N +cross_sum = zeros(N-k,1) ; + +% Numerator, unscaled covariance +for i = (k+1):N + cross_sum(i) = (y(i)-ybar)*(y(i-k)-ybar) ; +end + +% Denominator, unscaled variance +yvar = (y-ybar)'*(y-ybar) ; + +ta2 = sum(cross_sum) / yvar ; + diff --git a/acf_mtrx.m b/acf_mtrx.m new file mode 100644 index 0000000..b27ceef --- /dev/null +++ b/acf_mtrx.m @@ -0,0 +1,19 @@ +function R = acf_mtrx(Z, l_max, cr_flg); + +% 0. +[n, p] = size(Z); + +% 1. Compute the mean +mn = mean(Z); + +% 2. Remove the mean +Z_c = Z - ones(n, 1)*mn; + +for l = 0:l_max + R(l+1, :) = 1/(n-l) * sum(Z_c(1:n-l,:).*Z_c(1+l:end,:)); +end + +if cr_flg == 1 + R = R./(ones(size(R, 1), 1)*var(Z_c)); +end + diff --git a/addaxes.m b/addaxes.m new file mode 100644 index 0000000..c270394 --- /dev/null +++ b/addaxes.m @@ -0,0 +1,660 @@ +function HA = addaxes(varargin) +%ADDAXES Adds a new linked axis related by any monotonic function. +% +% SYNTAX: +% addaxes(...,'PropertyName',PropertyValue) +% addaxes('off') +% addaxes(AX,...) +% HA = addaxes(...); +% +% INPUT: +% AX - Uses given axes handle instead of current one. +% DEFAULT: gca +% 'PN'/PV - Paired property/value inputs to define the new axes, at +% least one of the following (see NOTE below): +% +% --------------|---------------------|-------------------- +% 'NAME' VALUE DEFAULT +% --------------|---------------------|-------------------- +% 'XFunction' Function name or @(x)interp1(... +% or handle get(AX,'XLim'),... +% 'xfun' get(AX,'XLim'),... +% x,'linear',..., +% 'extrap') +% +% 'XInverse' Function name or Inverse function +% or handle of 'xfun' estimated +% 'xinv' with FZERO. +% +% 'XLegend' XLABEL legend '' (none) +% or +% 'xleg' +% +% 'XDate' true, false, 'tlabel' false (TLABEL not +% or or 'datetick' used) +% 'xdat' +% --------------|---------------------|-------------------- +% +% 'off' - Deletes any axes previously added. See NOTE below. +% +% OUTPUT: +% HA - Handle of new axes. Not recommended to modify it. See NOTE +% below. +% +% DESCRIPTION: +% This functions adds axes to an existent axes, and links them to work +% with ZOOM/PAN and DATETICK (or TLABEL) if required. +% +% The big difference with the PLOTYY function (besides of working with +% the x-axis as well) is that the added axis is not forced to be +% linearly related with the old one. See the EXAMPLE below. +% +% The idea behind this function, is the creation of temporal invisible +% axes to get the Ticks and TickLabels acoordingly to the specifyed +% relationship. Then other axes are drawn above the current one with +% the same limits but with the Ticks mapped by inverting the +% relationships using the FZERO formulae or the given inverse function. +% See NOTE below for some warnings. +% +% NOTE: +% * Optional inputs use its DEFAULT value when not given or []. +% * Optional outputs may or not be called. +% * Besides from the paired optional inputs in Table above, the +% following normal axes properties may be used: +% 'XColor' 'XTick' 'XTickLabel' +% for axis customization and +% 'CLim' 'FontAngle' 'FontName' 'FontSize' +% 'FontUnits' 'FontWeight' 'Layer' 'LineWidth' +% 'TickDir' 'TickLength' +% for axes customization. +% * Of course, 'Y' axis properties as 'YFunction', 'YColor', etc., may +% be used as well, but not for 'Z'. +% * 'xFun', 'yFun', 'xInv' and 'yInv' must return an array of the same +% size as the input and must be monotonically increasing or +% decreasing. +% * When the relationships 'xFun' and 'yFun' are not linear and given +% by an interpolation (INTERP1 for example) the user must use them +% carefully taking into account extrapolations and the approximations +% results of the invertion formulae provided by FZERO. +% * If the new axis are dates, use the 'XDate'or 'YDate' options like +% in the following EXAMPLE. +% * By now, the function only can be used to add another axes once. +% That is, multiple axes cannot be added, and to include both x- and +% y-axis give both 'xFun' and 'yFun' functions. +% +% EXAMPLE: +% % DATA +% t = sort(unique(round(rand(50,1)*100)/100),'descend'); +% [X,Y,Z] = peaks(length(t)); +% xFun = @(x)interp1(X(1,:)',exp(t),x,'linear','extrap'); % Numerical +% yFun = @(x)x.*abs(x)+datenum(date); % Cuadratic +% xInv = @(x)interp1(exp(t),X(1,:)',x,'linear','extrap'); % Numerical +% % PLOT +% figure +% imagesc(X(1,[1 end]),Y([1 end],1),Z), set(gca,'Layer','top') +% addaxes(... % <- THE CLUE +% 'XFun' ,xFun,... +% 'YFun' ,yFun,... +% 'XInv' ,xInv,... +% 'XLeg' ,func2str(xFun),... +% 'YDat' ,'tlabel',... % date ticks! +% 'XColor' ,'b',... +% 'YColor' ,'r'); +% zoom on +% +% SEE ALSO: +% AXES, PLOT, PLOTYY, FUNCTION_HANDLE, FZERO, DATETICK +% and +% TLABEL by Carlos Vargas +% at http://www.mathworks.com/matlabcentral/fileexchange +% +% +% --- +% MFILE: addaxes.m +% VERSION: 1.1 (Sep dd, 2009) (download) +% MATLAB: 7.7.0.471 (R2008b) +% AUTHOR: Carlos Adrian Vargas Aguilera (MEXICO) +% CONTACT: nubeobscura@hotmail.com + +% REVISIONS: +% 1.0 Released. (Jul 29, 2009) +% 1.1 Fixed bug with function inversion, thanks to Allen Hall. Added +% 'Inverse' options and 'Position' link. Fixed small bug related +% with 'off' option. (Sep dd, 2009) + +% DISCLAIMER: +% addaxes.m is provided "as is" without warranty of any kind, under the +% revised BSD license. + +% Copyright (c) 2009 Carlos Adrian Vargas Aguilera + +% INPUTS CHECK-IN +% ------------------------------------------------------------------------- + +% Parameters. +myAppName = 'addAxes'; % Application data name. +zoomAppName = 'zoom_zoomOrigAxesLimits'; + +% Checks number of inputs and outputs. +if nargin<1 + error('CVARGAS:addaxes:notEnoughInputs',... + 'At least 1 input is required.') +elseif nargout>1 + error('CVARGAS:addaxes:tooManyOutputs',... + 'At most 1 output is allowed.') +end + + +% ------------------------------------------------------------------------- +% MAIN +% ------------------------------------------------------------------------- + +if ~((nargin==2) && isstruct(varargin{2})) + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % ADDAXES called from command window or a M-file + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + % Default. + AX = gca; + xFun = @(x) interp1(get(AX,'XLim'),get(AX,'XLim'),x,'linear','extrap'); + yFun = @(x) interp1(get(AX,'YLim'),get(AX,'YLim'),x,'linear','extrap'); + xInv = []; + yInv = []; + xLeg = ''; + yLeg = ''; + dFun = 'tlabel'; % Function to use on date axis. + nSeg = 0.25; % Second to wait for double-click. + + % Program data. To be saved as application data named appname. + data.AX = AX; + data.HA = []; + data.HF = []; + data.xNew = 0; + data.yNew = 0; + data.xFun = xFun; + data.yFun = yFun; + data.xInv = xInv; + data.yInv = yInv; + data.xLeg = xLeg; + data.yLeg = yLeg; + data.xDat = false; + data.yDat = false; + data.aOpt = {}; + data.aNam = myAppName; + data.nSeg = nSeg; + + % Parse inputs. + [data,xTic,yTic,xTLa,yTLa,flag] = parseInputs(data,dFun,varargin{:}); + clear varargin + + % Checks if 'off' option. + if flag + data = getappdata(data.AX,myAppName); + if ~isempty(data) + if ishandle(data.AX) % Fixed bug, Sep 2009 + zh = zoom(data.AX); + ph = pan(data.HF); + set(zh,'ActionPostCallback',[]) + set(ph,'ActionPostCallback',[]) + rmappdata(data.AX,myAppName) + if ishandle(data.HA) + linkaxes([data.AX data.HA],'off') + end + end + if ishandle(data.HA) + delete(data.HA) + end + end + if nargout<0, HA = []; end + return + end + + % Gets axis parent. + data.HF = ancestor(data.AX,{'figure','uipanel'}); + + % Changes some aspects on old axes. + view(data.AX,2) + box (data.AX,'off') + grid(data.AX,'off') + + % Gets new axis locations in front of old one. + pxLoc = get(data.AX,'XAxisLocation'); + pyLoc = get(data.AX,'YAxisLocation'); + xLoc = 'top'; if strcmp(pxLoc,xLoc), xLoc = 'bottom'; end + yLoc = 'right'; if strcmp(pyLoc,yLoc), yLoc = 'left'; end + + % Generates new axes. + tempF = get(0 ,'CurrentFigure'); + tempA = get(tempF,'CurrentAxes'); + data.HA = axes(... + 'Parent' ,data.HF,... + 'Color' ,'none',... + 'Box' ,'off',... + 'Units' ,get(data.AX,'Units'),... + 'Position' ,get(data.AX,'Position'),... + 'View' ,get(data.AX,'View'),... + 'FontUnits' ,get(data.AX,'FontUnits'),... + 'FontSize' ,get(data.AX,'FontSize'),... + 'FontName' ,get(data.AX,'FontNam'),... + 'FontWeight' ,get(data.AX,'FontWeight'),... + 'FontAngle' ,get(data.AX,'FontAngle'),... + 'Layer' ,get(data.AX,'Layer'),... + 'LineWidth' ,get(data.AX,'LineWidth'),... + 'Projection' ,get(data.AX,'Projection'),... + 'TickLength' ,get(data.AX,'TickLength'),... + 'TickDir' ,get(data.AX,'TickDir'),... + 'XColor' ,get(data.AX,'XColor'),... + 'YColor' ,get(data.AX,'YColor'),... + 'XDir' ,get(data.AX,'XDir'),... + 'YDir' ,get(data.AX,'YDir'),... + 'XAxisLocation' ,xLoc,... + 'YAxisLocation' ,yLoc,... + 'XTick' ,[],... + 'YTick' ,[],... + 'Tag' ,'addaxes',... + data.aOpt{:}); + set(0 ,'CurrentFigure',tempF) + set(tempF,'CurrentAxes' ,tempA) + + % Links properties. + theLinks = linkprop([data.AX data.HA],{'Units','Position','View'}); + setappdata(data.AX,[data.aNam 'Links'],theLinks); + setappdata(data.HA,[data.aNam 'Links'],theLinks); + + % Fixes title. + if strcmp(xLoc,'top') + title(data.HA,get(get(data.AX,'Title'),'String')) + title(data.AX,'') + set(data.HA,'Position',get(data.AX,'Position')) % In case it moves + end + + % Updates Ticks's and TickLabel's. + updateTickAndTickLabel(data,xTic,yTic,xTLa,yTLa) + + % Sets ZOOM and PAN functionalities. + zoom(data.AX,'reset') + zh = zoom(data.AX); + ph = pan(data.HF); + set(zh,'ActionPostCallback',@addaxes) + set(ph,'ActionPostCallback',@addaxes) + + % Links axes. + linkaxes([data.AX data.HA]) + + % Saves data. + setappdata(data.HA,data.aNam,data) + setappdata(data.AX,data.aNam,data) + +else + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % ADDAXES called after ZOOM or PAN + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + % Loads data. + data = getappdata(varargin{2}.Axes,myAppName); + if isempty(data) + if nargout==1, HA = data.HA; end + return + end + + % Checks axes. + if any(~ishandle([data.AX data.HA])) + if nargout==1, HA = data.HA; end + return + end + + % Finds out if double-click. + pause(data.nSeg) + drawnow + doubleclick = strcmp(get(data.HF,'SelectionType'),'open'); + if doubleclick + axis(data.AX,getappdata(data.AX,zoomAppName)) + end + + % Initializes. + xTic = []; xTLa = ''; + yTic = []; yTLa = ''; + + % Updates Ticks's and TickLabel's. + updateTickAndTickLabel(data,xTic,yTic,xTLa,yTLa); + +end + + +% OUTPUTS CHECK-OUT +% ------------------------------------------------------------------------- + +if nargout==1, HA = data.HA; end + + +% ========================================================================= +% SUBFUNCTIONS +% ------------------------------------------------------------------------- + +function updateTickAndTickLabel(data,xTic,yTic,xTLa,yTLa) +% Updates Ticks's and TickLabel's. + +% Gets axis limits. +pxLim = get(data.AX,'XLim'); +pyLim = get(data.AX,'YLim'); +set(data.HA,... + 'XLim' ,pxLim,... + 'YLim' ,pyLim) +xLim = sort(data.xFun(pxLim)); +yLim = sort(data.yFun(pyLim)); +if any(~isfinite(xLim)) || ~(diff(xLim)>eps(xLim(1))) + warning('CVARGAS:addaxes:incorrectXfunctionLim',... + ['"' func2str(data.xFun) '" is not a monotonical function ' ... + 'or generates NaNs within current X-axis range.']) + return +end +if any(~isfinite(yLim)) || ~(diff(yLim)>eps(yLim(1))) + warning('CVARGAS:addaxes:incorrectYfunctionLim',... + ['"' func2str(data.yFun) '" is not a monotonical function ' ... + 'or generates NaNs within current Y-axis range.']) + return +end + +% Generates a temporal axes with new limits. +tempF = get(0 ,'CurrentFigure'); +tempA = get(tempF,'CurrentAxes'); +fTemp = figure(... + 'Visible' ,'off',... + 'Units' ,get(data.HF,'Units'),... + 'Position' ,get(data.HF,'Position')); +aTemp = axes(... + 'Parent' ,fTemp,... + 'Units' ,get(data.AX,'Units'),... + 'Position' ,get(data.AX,'Position'),... + 'View' ,get(data.AX,'View'),... + 'XLim' ,xLim,... + 'YLim' ,yLim,... + data.aOpt{:}); +set(0 ,'CurrentFigure',tempF); +set(tempF,'CurrentAxes' ,tempA); + +% Sets date TickLabel's. +if data.xDat + if strcmpi(data.xDat,'tlabel') + try + tlabel(aTemp,'x','keeplimits') + if isempty(data.xLeg) + data.xLeg = get(get(aTemp,'XLabel'),'String'); + end + catch + % A possible error is to zoom in beyond seconds! + warning('CVARGAS:addaxes:zoomBeyondSecons',... + 'TLABEL does not work beyond seconds.') + datetick(aTemp,'x','keeplimits') + end + else + datetick(aTemp,'x','keeplimits') + end +end +if data.yDat + if strcmpi(data.yDat,'tlabel') + try + tlabel(aTemp,'y','keeplimits') + if isempty(data.yLeg) + data.yLeg = get(get(aTemp,'YLabel'),'String'); + end + catch + % A possible error is to zoom in beyond seconds! + warning('CVARGAS:addaxes:zoomBeyondSecons',... + 'TLABEL does not work beyond seconds.') + datetick(aTemp,'y','keeplimits') + end + else + datetick(aTemp,'y','keeplimits') + end +end + +% Changes Tick's and TickLabel's. +if (data.xNew~=0) && isempty(xTic) + % Fixed bug, Sep 2009 + [xTic,xTLa] = changeTickAndTicklabel(... + get(aTemp,'XTick'),get(aTemp,'XTickLabel'),data.xFun,data.xInv,pxLim,... + data.xDat); + if (length(xTic)<2) + warning('CVARGAS:addaxes:zoomXOut',... + 'Too depth ZOOM on x-axis!') + else + dTic = diff(xTic); + if any(dTic<=eps(min(dTic))) + warning('CVARGAS:addaxes:incorrectXfunctionTick',... + ['"' func2str(data.xFun) '" is not a monotonical function ' ... + 'within current X-axis range.']) + return + end + end +end +if (data.yNew~=0) && isempty(yTic) + % Fixed bug, Sep 2009 + [yTic,yTLa] = changeTickAndTicklabel(... + get(aTemp,'YTick'),get(aTemp,'YTickLabel'),data.yFun,data.yInv,pyLim,... + data.yDat); + if (length(yTic)<2) + warning('CVARGAS:addaxes:zoomYOut',... + 'Too depth ZOOM on y-axis!') + else + dTic = diff(yTic); + if any(dTic<=eps(min(dTic))) + warning('CVARGAS:addaxes:incorrectYfunctionTick',... + ['"' func2str(data.yFun) '" is not a monotonical function ' ... + 'within current Y-axis range.']) + return + end + end +end + +% Deletes temporal axes. +delete(aTemp), delete(fTemp) + +% Updates axes +set(data.HA,... + 'XTick' ,xTic,... + 'YTick' ,yTic,... + 'XTickLabel' ,xTLa,... + 'YTickLabel' ,yTLa); + +% Sets legends. +if ~isempty(data.xLeg), set(get(data.HA,'XLabel'),'String',data.xLeg), end +if ~isempty(data.yLeg), set(get(data.HA,'YLabel'),'String',data.yLeg), end + +function [Tic,TLa] = changeTickAndTicklabel(Tic,TLa,FUN,INV,LIM,DAT) +% Changes Tick's and TickLabel's. +if ~DAT + ind = 1; if (str2double(TLa(ind,:))*Tic(ind))==0, ind = 2; end + if str2double(TLa(ind,:))~=Tic(ind) + expstr = int2str(log10(Tic(ind)/str2double(TLa(ind,:)))); + if ~strcmp(expstr,'0') && ~strcmp(expstr,'-0') + ind = double(isspace(TLa)); + ini = arrayfun(@(x)repmat(' ',1,x),sum(cumprod(ind,2),2),... + 'Uniformoutput',false); + ind = fliplr(ind); + fin = arrayfun(@(x)repmat(' ',1,x),sum(cumprod(ind,2),2),... + 'Uniformoutput',false); + TLa = strcat(ini,strtrim(cellstr(TLa)),['e' expstr],fin); + if iscellstr(TLa), TLa = char(TLa); end + end + end +end + +for k = 1:length(Tic) + % Fixed Bug (thanks to Allen Hall), Sep 2009 + if isempty(INV) + temp = fzero(@(x) FUN(x)-Tic(k),LIM(1)); + if ~((temp>=LIM(1) && (temp<=LIM(2)))) + temp = fzero(@(x) FUN(x)-Tic(k),LIM(2)) + if ~((temp>=LIM(1) && (temp<=LIM(2)))) + temp = fzero(@(x) FUN(x)-Tic(k),mean(LIM)) + datenum(temp) + if ~((temp>=LIM(1) && (temp<=LIM(2)))) + error('CVARGAS:addaxes:unableToGetInverse',... + ['Unable to get ''x'' from ' func2str(FUN) ' = ' num2str(Tic(k)) ... + '. Better try given the inverse formulae.']) + end + end + end + else + try + temp = INV(Tic(k)); + catch + error('CVARGAS:addaxes:errorWithInverse',... + ['Unable to get ''x'' from ' func2str(INV) ' = ' num2str(Tic(k)) ... + '. Error while evaluating.']) + end + if ~((temp>=LIM(1) && (temp<=LIM(2)))) + error('CVARGAS:addaxes:incorrectInverse',... + ['Unable to get ''x'' from ' func2str(INV) ' = ' num2str(Tic(k)) ... + '. Value out of bounds.']) + end + end + Tic(k) = temp; +end +ind = ~isfinite(Tic); +Tic(ind) = []; +TLa(ind,:) = []; +if (length(Tic)>1) && (Tic(1)>Tic(2)) + Tic = sort(Tic); + TLa = flipud(TLa); +end + +function [data,xTic,yTic,xTLa,yTLa,flag] = parseInputs(data,dFun,varargin) +% Parses inputs. + +% Defaults. +xTic = []; xTLa = ''; +yTic = []; yTLa = ''; + +% Gets axes. +if ~isempty(varargin) && (length(varargin{1})==1) && ishandle(varargin{1}) + data.AX = varargin{1}; + if ~strcmp(get(data.AX,'Type'),'axes') + error('CVARGAS:addaxes:incorrectHandleInput',... + 'First input must ve a valid axes handle.') + end + varargin(1) = []; + % Updates functions. + AX = data.AX; + data.xFun = eval(func2str(data.xFun)); + data.yFun = eval(func2str(data.yFun)); +end + +% Checks 'off' option. +flag = false; +if length(varargin)==1 + if isempty(varargin{1}) + % continue + elseif strcmp(varargin{1},'off') + flag = true; + return + end + varargin(1) = []; +end + +% Checks already used function. +if isappdata(data.AX,data.aNam) + error('CVARGAS:addaxes:invalifFunctionUse',... + 'By now ADDAXES only can be used once. Use ''off'' option before.') +end + +% Checks if paired options. +if rem(length(varargin),2)~=0 + error('CVARGAS:addaxes:incorrectPairedOptions',... + 'Option(s) must be paired property/value(s).') +end + +% Saves defaults temporarly in case the given one is empty. +dxFun = data.xFun; +dyFun = data.yFun; + +% Loop. +while ~isempty(varargin) + if isempty(varargin{1}) + error('CVARGAS:addaxes:emptyPropertyName',... + 'Input property name can not be empty.') + elseif ischar(varargin{1}) + n = length(varargin{1}); + if strncmpi(varargin{1},'XFunction' ,max(4,n)), data.xFun = varargin{2}; data.xNew = 1; + elseif strncmpi(varargin{1},'YFunction' ,max(4,n)), data.yFun = varargin{2}; data.yNew = 1; + elseif strncmpi(varargin{1},'XInverse' ,max(4,n)), data.xInv = varargin{2}; + elseif strncmpi(varargin{1},'YInverse' ,max(4,n)), data.yInv = varargin{2}; + elseif strncmpi(varargin{1},'XLegend' ,max(4,n)), data.xLeg = varargin{2}; + elseif strncmpi(varargin{1},'YLegend' ,max(4,n)), data.yLeg = varargin{2}; + elseif strncmpi(varargin{1},'XDate' ,max(4,n)), data.xDat = varargin{2}; + elseif strncmpi(varargin{1},'YDate' ,max(4,n)), data.yDat = varargin{2}; + elseif strncmpi(varargin{1},'XTick' ,max(5,n)), xTic = varargin{2}; + elseif strncmpi(varargin{1},'YTick' ,max(5,n)), yTic = varargin{2}; + elseif strncmpi(varargin{1},'XTickLabel',max(6,n)), xTLa = varargin{2}; + elseif strncmpi(varargin{1},'YTickLabel',max(6,n)), yTLa = varargin{2}; + elseif strncmpi(varargin{1},'XColor' ,max(2,n)) || ... + strncmpi(varargin{1},'YColor' ,max(2,n)) || ... + strncmpi(varargin{1},'CLim' ,max(2,n)) || ... + strncmpi(varargin{1},'FontAngle' ,max(5,n)) || ... + strncmpi(varargin{1},'FontName' ,max(5,n)) || ... + strncmpi(varargin{1},'FontSize' ,max(5,n)) || ... + strncmpi(varargin{1},'FontUnits' ,max(5,n)) || ... + strncmpi(varargin{1},'FontWeight' ,max(5,n)) || ... + strncmpi(varargin{1},'Layer' ,max(2,n)) || ... + strncmpi(varargin{1},'LineWidth' ,max(5,n)) || ... + strncmpi(varargin{1},'TickDir' ,max(3,n)) || ... + strncmpi(varargin{1},'TickLength' ,max(5,n)) + data.aOpt = {data.aOpt{:},varargin{1},varargin{2}}; + else + error('CVARGAS:addaxes:invalidProperty',... + ['Invalid property ''' varargin{1} '''.']) + end + else + error('CVARGAS:addaxes:invalidPropertyType',... + 'Property name must be a valid one.') + end + varargin(1:2) = []; +end + +% Checks empty functions. +if isempty(data.xFun), data.xFun = dxFun; end +if isempty(data.yFun), data.yFun = dyFun; end + +% Checks string functions. +if ischar(data.xFun), data.xFun = str2func(data.xFun); end +if ischar(data.yFun), data.yFun = str2func(data.yFun); end + +% Checks ticks and labels. +if ((size(xTLa,1)~=0) && (length(xTic)~=size(xTLa,1))) || ... + ((size(yTLa,1)~=0) && (length(yTic)~=size(yTLa,1))) + error('CVARGAS:addaxes:invalidLabels',... + ['When ''TickLabel''s are specified, '... + 'the respective ''Tick''s should be given too.']) +end + +% Checks date axis. +if data.xDat + if ~ischar(data.xDat) + data.xDat = dFun; + else + data.xDat = lower(data.xDat); + if ~strcmp(data.xDat,'tlabel') && ~strcmp(data.xDat,'datetick') + error('CVARGAS:addaxes:invalidDateFunction',... + ['Unrecognized ''' data.xDat ''' function. ' ... + 'Must be one of ''tlabel'' or ''datetick''.']) + end + end +end +if data.yDat + if ~ischar(data.yDat) + data.yDat = dFun; + else + data.yDat = lower(data.yDat); + if ~strcmp(data.yDat,'tlabel') && ~strcmp(data.yDat,'datetick') + error('CVARGAS:addaxes:invalidDateFunction',... + ['Unrecognized ''' data.yDat ''' function. ' ... + 'Must be one of ''tlabel'' or ''datetick''.']) + end + end +end + + +% [EOF] addaxes.m \ No newline at end of file diff --git a/agg_glbwrf.m b/agg_glbwrf.m new file mode 100644 index 0000000..805d9ae --- /dev/null +++ b/agg_glbwrf.m @@ -0,0 +1,57 @@ +function otpt = agg_glbwrf(inpt, varname, period, avswitch) + + +time = nj_varget(inpt, 'time'); + +for i = 1:length(time) + tmp = int2str(time(i)); + yr(i,:) = str2num(tmp(1:4)); + mn(i,:) = str2num(tmp(5:6)); + dy(i,:) = str2num(tmp(7:8)); + hr(i,:) = str2num(tmp(9:10)); + dta{i} = flipud(nj_varget(inpt, varname, [i 1 1], [1 inf inf])); +end + + +if strcmp(period, 'daily') + days = unique([yr mn dy], 'rows'); + for i = 1:size(days,1) + indx = find(dy == days(i,3)); + otpt{i} = 0; + for j = 1:length(indx) + otpt{i} = otpt{i} + dta{indx(j)}; + end + if avswitch == 1 + otpt{i} = otpt{i}/length(indx); + end + end + +elseif strcmp(period, 'monthly') + mnths = unique([yr mn], 'rows'); + for i = 1:size(mnths,1) + indx = find(mn == mnths(i,2)); + otpt{i} = 0; + for j = 1:length(indx) + otpt{i} = otpt{i} + dta{indx(j)}; + end + if avswitch == 1 + otpt{i} = otpt{i}/length(indx); + end + end + +end + + + + + + + + + + + + + + + diff --git a/area_wghts.m b/area_wghts.m new file mode 100644 index 0000000..da44a49 --- /dev/null +++ b/area_wghts.m @@ -0,0 +1,83 @@ +function A = area_wghts(theta, dlambda, otpt, method, R) + +% area.m computes the area of a particular pixel (or a vector with pixel +% center coordinates) according to its co-latitude and angular side length +%-------------------------------------------------------------------------- +% Input: n [1 x 1] angular side length of a pixel [deg] +% (default: n = 0.5°) +% theta [n x 1] co-latitude of the pixel center [deg] +% +% Output: A [n x 1] area of the pixels on the surface +% of the Earth [m^2] +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: January 2008 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +if nargin < 5, R = 6378137; end +if nargin < 4, method = 'regular'; end +if nargin < 3, otpt = 'vec'; end +if nargin < 2, dlambda = abs(theta(2) - theta(1)); end + +rho = pi/180; + + + +dlat = abs(theta(2) - theta(1)); + +lat1 = (theta + dlat/2)*rho; +lat2 = (theta - dlat/2)*rho; + +dlat = dlat*rho; +dlon = dlambda*rho; + + +if strcmp(method, 'haversine') + + for i = 1:length(theta) + sn(i,1) = haversine(lat1(i), lat1(i), dlambda*rho); + ss(i,1) = haversine(lat2(i), lat2(i), dlambda*rho); + sew(i,1) = haversine(lat1(i), lat2(i), 0); + end + + A = 1/2*(sn + ss).*sew; + + + +elseif strcmp(method, 'regular') + + A = abs(dlon*R^2.*(sin(lat1) - sin(lat2)))'; + +elseif strcmp(method, 'cos') + + A = cos(theta*pi/180)'; + +elseif strcmp(method, 'vincenty') + + for i = 1:length(theta) + sn(i,1) = vincenty(lat1(i), lat1(i), dlambda*rho); + ss(i,1) = vincenty(lat2(i), lat2(i), dlambda*rho); + sew(i,1) = vincenty(lat1(i), lat2(i), 0); + end + + % Interpolation for equatorial values + snnan = find(isnan(sn)); + ssnan = find(isnan(ss)); + + sn(snnan) = (sn(snnan-1) + sn(snnan+1))/2; + ss(ssnan) = (ss(ssnan-1) + ss(ssnan+1))/2; + + A = 1/2*(sn + ss).*sew; +end + + +if strcmp(otpt, 'mat') + A = A*ones(1,360/(dlambda)); +end + + + + + \ No newline at end of file diff --git a/blank.m b/blank.m new file mode 100644 index 0000000..65261d3 --- /dev/null +++ b/blank.m @@ -0,0 +1,35 @@ +function [ordb, abcb] = blank(ord, abc, M) +% assuming ord is depth, positive downward. +% abc is like time or latitude. +[nr, nc] = size(M); + +[abc, ii] = sort(abc); +M = M(:,ii); + +% dord and dabc are used to push the polygon out a bit, to +% avoid unwanted blanking due to roundoff. +dord = min(abs(diff(ord(:))))*0.01; +% assuming ord is depth, positive downward. +% abc is increasing; it has been sorted. +dabc = min(abs(diff(abc(:))))*0.01; + +ORD = ord(:,ones(nc,1)); +badmask = isnan(M); + +ORD(badmask) = NaN; +ordmax = max(ORD) + dord; +ordmin = min(ORD) - dord; + +ii = isnan(ordmax); +ordmax(ii) = max(ordmin); +ordmin(ii) = ordmax(ii)-dord; + +ordmax(2:(end-1)) = max([ ordmax(1:(end-2)); ordmax(2:(end-1)); ordmax(3:end)]); +ordmin(2:(end-1)) = min([ ordmin(1:(end-2)); ordmin(2:(end-1)); ordmin(3:end)]); + +abc(1) = abc(1) - dabc; +abc(end) = abc(end) + dabc; + +ordb = [ordmax ordmin(end:-1:1) ordmax(1)]; +abcb = [abc abc(end:-1:1) abc(1)]; + diff --git a/boxplot_data.m b/boxplot_data.m new file mode 100644 index 0000000..42c9e65 --- /dev/null +++ b/boxplot_data.m @@ -0,0 +1,68 @@ +function [box_data, outliers] = boxplot_data(bxplt_id); + +% As MATLAB does (originally) not provide a function to compute the +% elements of a boxplot directly, this function extracts the boxplot data +% (median, upper/lower whisker, the 25% and 75% quantiles and the outliers) +% and stores them in two output matrices. The elements of these matrices +% are as follows: +% +% box_data = group_id | median | lower_whisker | 25% | 75% |upper_whisker +% outliers = group_id | outlier_value +% +% This format can be directly saved as an ascii file and used e.g. for +% creating a box-and-whisker-plot in GMT. +% +% +% Outlier testing (taken from the MATLAB-help) +% +% The default is a w of 1.5. Points are drawn as outliers if they are +% larger than q3 + w(q3 – q1) or smaller than q1 – w(q3 – q1), where q1 and +% q3 are the 25th and 75th percentiles, respectively. The default of 1.5 +% corresponds to approximately +/–2.7σ and 99.3 coverage if the data are +% normally distributed. +%-------------------------------------------------------------------------- +% Input: bxplt_id Handle to the boxplot from which the data +% shall be extracted +% e.g. h = boxplot(data) -> bxplot_id = h +% +% Output: box_data median, upper/lower whisker, the 25% and +% 75% quantiles of the plotted data +% outliers Contains the outliers +% +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: August 2012 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + + +% Define handles for the different boxplot elements +h1 = findobj(bxplt_id, 'tag', 'Median'); +h2 = findobj(bxplt_id, 'tag', 'Upper Whisker'); +h3 = findobj(bxplt_id, 'tag', 'Lower Whisker'); +h4 = findobj(bxplt_id, 'tag', 'Outliers'); + +% Extract the data from the handles +y1 = get(h1, 'YData'); % Median +y2 = get(h2, 'YData'); % Upper Whisker, 75% quantile +y3 = get(h3, 'YData'); % Lower Whisker, 25% quantile +y4 = get(h4, 'YData'); % Outliers + +outliers = []; + +for i = 1:length(y1) + box_data(i, 1) = i; + box_data(i, 2) = y1{i}(1, 1); + box_data(i, 3) = y3{i}(1, 1); + box_data(i, 4) = y3{i}(1, 2); + box_data(i, 5) = y2{i}(1, 1); + box_data(i, 6) = y2{i}(1, 2); + + tmp = [ones(length(y4{i}), 1)*i y4{i}(:)]; + outliers = [outliers; tmp]; +end + + + + diff --git a/catchmat2cell.m b/catchmat2cell.m new file mode 100644 index 0000000..1822724 --- /dev/null +++ b/catchmat2cell.m @@ -0,0 +1,62 @@ +function F = catchmat2cell(flds, cindx, rws, cls, mval, arr); +% The function re-arranges the elements of the matrix flds into a +% cell array, where each cell contains one "map". The locations on +% these maps must be referenced by the vector cindx. The output +% maps have the dimension [rws x cls], where undefined values +% are set to mval. +% Normally, the flds-matrix is arranged longitude-wise. In the +% case of latitude-ordering, arr must be set to 2. +%-------------------------------------------------------------------------- +% Input: flds [m x n] Matrix which contains maps for m number +% of timesteps and n pixels +% cindx [n x 1] Referencing vector which contains the +% ondices of the elements of flds in a large +% "map". +% rws, cls [1 x 1] Number of rows and columns of the output +% maps. +% mval [1 x 1] Value of undefined elements +% Default: -9999 +% arr 1 or 2 arr = 1: Longitude ordering +% arr = 2: Latitude ordering +% Detault: arr = 1; +% Output: F {m x 1} Cell array which has a number of elements +% equal to the number of time-steps (i.e. +% the rows) of flds. +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: July 2011 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +if nargin < 5, mval = -9999; end +if nargin < 6, arr = 1; end + +[n, p] = size(flds); + +if n == 1 + tmp = ones(rws*cls, 1)*mval; + tmp(cindx) = flds; + if arr == 2 + F = reshape(tmp, cls, rws); + F = F'; + else + F = reshape(tmp, rws, cls); + end + +else + F = cell(n, 1); + for i = 1:n + F{i} = zeros(rws, cls); + + tmp = ones(rws*cls, 1)*mval; + tmp(cindx) = flds(i,:); + if arr == 2 + F{i,1} = reshape(tmp, cls, rws); + F{i,1} = F{i,1}'; + else + F{i,1} = reshape(tmp, rws, cls); + end + end +end + diff --git a/cca.m b/cca.m new file mode 100644 index 0000000..c971e0e --- /dev/null +++ b/cca.m @@ -0,0 +1,34 @@ +function [wx, wy, lam] = cca(X, Y); + +% Canonical correlation analysis for the two [m x n] data matrices X and Y +% which containn n samples of two m-dimensional random variables. + + +[rx, cx] = size(X); +[ry, cy] = size(Y); + +if rx ~= ry | cx ~= cy + error('Data matrices must have the same dimension'); +end + + +% 1. Removing the mean +Ax = X - ones(rx, 1)*mean(X,1); +Ay = Y - ones(ry, 1)*mean(Y,1); + +% Performing an svd for the data matrices +[Ux, Dx, Vx] = svd(Ax, 'econ'); +[Uy, Dy, Vy] = svd(Ay, 'econ'); + +K = Ux'*Uy; + +[U, D, V] = svd(K, 'econ'); + + +wx = Vx*inv(Dx)*U; +wy = Vy*inv(Dy)*V; + +lam = diag(D).^2; + + + diff --git a/cca_ana.m b/cca_ana.m new file mode 100644 index 0000000..295d96c --- /dev/null +++ b/cca_ana.m @@ -0,0 +1,432 @@ +function [eofs, pcs, lams, recon] = eof_ana(inpt1, inpt2, varargin); +% The function computes the Empirical orthagonal functions, principal +% components and the eigenvalues for a set of input fields. +%-------------------------------------------------------------------------- +% Input (mandatory): +% - inpt1 {m x 1} Cell arrays which contains the input fields. +% inpt2 {n x 1} +% +% Input (optional): +% - theta [i x 1] Vector containing the latitudes of the elements of +% the input fields. Theta is only needed if areawghts +% is set to true +% Default: (89.75:-0.5:-89.75)' +% +% - miss [1 x 1] Scalar (or NaN) representing missing values in the +% input fields. The parameter is only needed if +% cntsmiss is set to true. +% Default: -9999 +% +% - mask [i x j] Optional binary map which can be applied to perform +% a domain selection. The map must have the same +% dimensions as the input fields and its elements must +% be either 0 or 1. +% +% - mxmde [1 x 1] Highest mode of the singular value decomposition. +% Set to 0 if the highest mode should be determined +% from the data. +% Default: 0 +% +% - dorecon (logical) If set to true, the function will compute maps of +% the eigenvectors (the EOFs) and reconstruct the data +% from the PCs and the EOFs. +% Default: true +% +% - addmn (logical) By default, the input data is centralized before the +% computation of the EOFs. To better compare the +% reconstructed data with the input fields, the mean +% can be added back to the maps. +% Default: true +% +% - remmn (logical) Prior to the computation of the EOFs, the temporal +% mean should be removed from the input data. In this +% case, the matrix product R = F'*F represents the +% covariance matrix of the input data. +% Default: true +% +% - cntsmiss (logical) In some cases, the input dataset contains missing +% values. The function removes these values from the +% analysis. By default, it is assumed that missing +% elements contain -9999. In any other case, the +% parameter miss must be set to the appropriate value. +% Default: false +% +% - areawght (logical) In most cases, latitude-dependent area weights (the +% square root of the cosine of the latitude) are +% applied to account for meridional convergence, i.e. +% the area of the grid cells decreases towars high +% latitudes. If set to true, the parameter theta must +% agree with the latitudes of the input fields. +% Default: true +% +% - decompdim (string) The dimension in which the decomposition shall be +% performed. By default, the function does a temporal +% decomposition. +% Default: temp +% +% - nrmflgeof (logical)If normflg is set to true, the EOFs are normalized +% so that the length of each vector is 1. +% Default: true +% +% - nrmflgdta (logical)If nrmflgdta is set to true, the data is normalized +% before the EOF decomosition. In this case, the EOFs +% do not consider the amplitude of the signal but only +% the variability. This helps if the EOFS of different +% quantities (e.g. precipitation and temperature) +% should be compared. +% Default: false +% quantitiers +% +% Output: +% - eofs Matrix which contains the normalized eigenvectors +% (i.e. the EOFs of the covariance matrix) where the +% kth column contains the EOF of the kth mode. +% +% - pcs Principal components of the EOFs which account for +% their temporal variability. The matrix contains +% time-series for each principal component. +% +% - lams Eigenvalues of the covariance matrix R = F'*F. The +% first column contains the absolute values. The +% second column contains the percentage of the total +% explained variance (i.e. the sum of all eigenvalues) +% while the elements of the third column are the +% cumulative squared covariance fraction. +% +% - recon Structure parameter which contains maps of the +% eigenvectors and the reconstructed input data from +% the EOFs and the PCs. +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: January 201 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- +fprintf('------------------------------------------------------------ \n') +fprintf(' EOF - Analysis \n') +fprintf('------------------------------------------------------------ \n') + + +% ------------------------------------------------------------------------- +% Input checking and setting defalut values +% ------------------------------------------------------------------------- + +pp = inputParser; +pp.addRequired('inpt', @(x) (iscell(x) | isnumeric(x))); + +pp.addParamValue('theta', (89.75:-0.5:-89.75)', @isnumeric); +pp.addParamValue('miss', -9999, @(x) (isnumeric(x) | strcmp(x, 'NaN'))); +pp.addParamValue('mask', 0, @isnumeric); +pp.addParamValue('mxmde', 0, @isint); + +pp.addParamValue('dorecon', true, @islogical); +pp.addParamValue('addmn', true, @islogical); +pp.addParamValue('remmn', true, @islogical); +pp.addParamValue('cntsmiss', false, @islogical); +pp.addParamValue('areawght', true, @islogical); +pp.addParamValue('decompdim', 'temp', @ischar); +pp.addParamValue('nrmflgeof', true, @islogical); +pp.addParamValue('rotflg', 0, @isint); +pp.addParamValue('maxit', 1000, @isint); +pp.addParamValue('normrot', true, @islogical); + + + +pp.parse(inpt, varargin{:}); + +areawght = pp.Results.areawght; +theta = pp.Results.theta; +miss = pp.Results.miss; +mask = pp.Results.mask; +mxmde = pp.Results.mxmde; +dorecon = pp.Results.dorecon; +addmn = pp.Results.addmn; +remmn = pp.Results.remmn; +cntsmiss = pp.Results.cntsmiss; +decompdim = pp.Results.decompdim; +nrmflgeof = pp.Results.nrmflgeof; +rotflg = pp.Results.rotflg; +maxit = pp.Results.maxit; +normrot = pp.Results.normrot; + +clear pp + + +% Compute the size of the input fields and the number of available samples +% (i.e. the number of time-steps). It is assumed that the number of +% time-steps corresponds to the number of available fields in the input +% dataset. +[rws, cls] = size(inpt{1}); +nts = length(inpt); + + +% ------------------------------------------------------------------------- +% Computation of area weights +% ------------------------------------------------------------------------- +% Compute weight factors. In some publications the weight factors are +% simply the cosines of the latitute. But it is more convenient to use the +% square root of the cosines as this will ensure the correct weighting of +% the covariance matrix, i.e C_w = cos(lat)*F'*F; +if areawght == true + fprintf('EOF-ana -> Computing the weight factors! \n') + if size(theta, 2) > size(theta, 1) + theta = theta'; + end + weights = sqrt(cos(theta*pi/180))*ones(1, cls); +else + weights = ones(rws, cls); +end + + +% ------------------------------------------------------------------------- +% Domain selection and array re-ordering +% ------------------------------------------------------------------------- +% Domain selection with a binary mask. Note that the computation of EOFs +% has a strong domain dependency, i.e the results might differ +% significantly based on the selected domain. +% If no mask is applied, the function generates a mask to account for +% missing elements in the input data, i.e. grid points where no values +% are available +if size(mask) == [1 1] + fprintf('EOF-ana -> No mask is applied! \n') + if cntsmiss == true + % Create a mask with the same dimensions as the input dataset. In + % this mask, all elements which are missing in the input data are + % set to zero. The loop must be evaluated prior to transforming the + % input cell array to a large data matrix to account for changing + % positions of the missing values. + mask = ones(rws, cls); + for i = 1:nts + mask(inpt{i} == miss) = 0; + end + % Mask_vec is the column vector representing the mask. This is used + % to determine the index (i.e. the location) of missing values in + % the input dataset. + mask_vec = mask(:); + c_indx = find(mask_vec == 1); + + % The loop weights the input data according to the previously + % computed weight factors and stores only the elements at the + % locations where the mask matrix contains ones. + fprintf('EOF-ana -> Rearange the cell-array in a matrix! \n') + for i = 1:nts + tmp = inpt{i}.*weights; + F(i,:) = tmp(mask == 1)'; + clear tmp + end + + else + % No mask is provided and the input data does not contain + % missing values + fprintf('EOF-ana -> Rearange the cell-array in a matrix! \n') + for i = 1:nts + tmp = inpt{i}.*weights; + F(i,:) = tmp(:)'; + clear tmp + end + % In this case, c_indx is simply a column vector whose elements are + % all 1. + c_indx = (1:rws*cls)'; + end + + +% Apply a binary mask to perform the domain selection +elseif size(mask) == [rws, cls] + + % If the input data contains missing values, these elements are set to + % zero in the mask + if cntsmiss == true + for i = 1:nts + mask(inpt{i} == miss) = 0; + end + end + % Mask_vec is the column vector representing the mask. This is used + % to determine the index (i.e. the location) of missing values in + % the input dataset. + mask_vec = mask(:); + c_indx = find(mask_vec == 1); + + for i = 1:nts + tmp = inpt{i}.*weights; + F(i,:) = tmp(mask == 1)'; + clear tmp + end +end + +clear weights + +if strcmp(decompdim, 'spat') + F = F'; +end + +% ------------------------------------------------------------------------- +% Centralize the input dataset +% ------------------------------------------------------------------------- +% Removing the sample mean from the data. This ensures that R = F'*F can be +% interpreted as the covariance matrix (which is not done explicitly in +% this function as the SVD-approach is used to compute the EOFs). However, +% the EOFs can be also computed without removing the mean from the data +% (set remmn to false) but the results are more difficult to interpret. +if remmn == true + fprintf('EOF-ana -> Removing the mean... \n') + mn_F = mean(F,1); + F = F - ones(nts, 1)*mn_F; +end +Fnew = F; +save Fnew.mat Fnew + +% ------------------------------------------------------------------------- +% Compute EOFs, PCs and eigenvalues through SVD +% ------------------------------------------------------------------------- +% Compute left and right singular vecors and the singular values of the +% input data F througth F = U*P*V' +% The right singular vectors V contain the eigenvectors (EOFs) of the +% covariance matrix R = F'*F while the (diagonal) matrix P contains the +% square roots of the eigenvalues of R. +if mxmde == 0 | mxmde == nts + [U, P, eofs] = svd(F, 'econ'); +elseif mxmde == rws*cls + [U, P, eofs] = svd(F); +else + [U, P, eofs] = svds(F, mxmde); +end + +% Compute the eigenvalues and the fraction of the variance explained +% For the eigenvalues, we have to divide through the sample size (which is +% the number of time-steps). This is due to the fact that the squared +% elements of P are the eigenvalues of the covariance matrix R = F'*F, but +% it is more reasonable to divide the covariance throught the number of +% samples, i.e. R = (1/(N-1))*F'*F. +% The function further computes the squared covariance explained (SCF) and +% the cumulative squared covariance fraction (CSCF) of the covariance +% matrix R. +lams(:,1) = (diag(P).^2)/(nts); % Eigenvalues +lams(:,2) = (lams(:,1)/sum(lams(:,1)))*100; % SCF +for i = 1:length(lams) + lams(i,3) = (sum(lams(1:i,1))/sum(lams(:,1)))*100; % CSCF +end + + + +% The EOFs are normalized such that the sum of squares for each EOF pattern +% equals one. To denormalize the returned EOFs multiply by the square root +% of the associated eigenvalue. +if nrmflgeof == true + eofs = eofs./(ones(length(eofs),1)*sum(eofs.^2).^(1/2)); +end + +% Compute the principal components which are row vectors containing +% time-series for each mode. They are simply the projection of F onto the +% EOF of each mode. +pcs = F*eofs; + +% Still under construction..... +% ------------------------------------------------------------------------- +% Optional: Rotation of EOFs +% ------------------------------------------------------------------------- +% Rotation of the PCs +% if rotflg == 1 +% if normrot == true +% pcs_rt = pcs./(ones(nts,1)*sum(pcs.^2).^(1/2)); +% else +% pcs_rt = pcs; +% end +% +% TT = eye( nc ); +% d = 0; +% +% for i = 1 : maxit +% z = x * TT; +% B = x' * ( z.^3 - z * diag(squeeze( ones(1,p) * (z.^2) )) / p ); +% +% [U,S,V] = svd(B); +% +% TT = U * V'; +% +% d2 = d; +% d = sum(diag(S)); +% +% % End if exceeded tolerance. +% if d < d2 * (1 + tol), break; end +% end + + + + + + + + + +% ------------------------------------------------------------------------- +% Reconstruction of the EOFs and the input data +% ------------------------------------------------------------------------- +% For the spatial representation of the EOFs, the function computes maps +% for each single mode and reconstructs the input data from the EOFs. +if dorecon == true + % For each mode, a vector with [rws*cls] elements (NaNs) is created. + % This allows to reshape each EOF mode to the dimensions of the input + % fields (i.e. the maps). + for i = 1:size(eofs,2) + tmp = zeros(rws*cls, 1)*NaN; + tmp(c_indx,1) = eofs(:,i); + recon.eofs{i,1} = reshape(tmp, rws, cls); + clear tmp + end + + % Each row of F_recon represents the reconstruction (i.e. the map) of + % the input data from the PCs and the EOFs at one time-step. Thus, the + % matrix has a total of [nts] rows. + F_recon = pcs*eofs'; + + % To better compare the reconstruced data with the input data, the mean + % must be added back to the maps. This step also ensures that unwanted + % elements in the input dataset, i.e. where the binary map contains + % zeros, are set to NaN in the reconstructed maps. + mn_fld = zeros(rws*cls, 1)*NaN; + if addmn == true + mn_fld(c_indx,1) = mn_F(:); + else + mn_fld(c_indx,1) = 0; + end + + % The reconstruction of the input data is performed by reshaping each + % row of the F_recon matrix to a matrix with [rws, cls] dimensions. + for i = 1:size(F_recon, 1) + tmp = zeros(rws*cls, 1); + tmp(c_indx, 1) = F_recon(i,:)'; + recon.F{i} = reshape(tmp + mn_fld, rws, cls); + clear tmp + end + +else + recon = 0; +end + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cdiffcell.m b/cdiffcell.m new file mode 100644 index 0000000..fce554e --- /dev/null +++ b/cdiffcell.m @@ -0,0 +1,103 @@ +function ddt = cdiffcell(inpt, varargin); +% The function computes the first derivative of a given input dataset inpt +% according to the method of central differences. For the first (last) +% field, the derivative is computed according to forward (backward) +% differences. +%-------------------------------------------------------------------------- +% Input: inpt {m x n} Cell array which contains the input +% fields. +% clms [1 x 3] column indices of the input containing +% month, year and the corresponding field + + +% +% Output: otpt [m x k] matrix containing the area-weighted +% means +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: July 2011 +%-------------------------------------------------------------------------- +% Uses: area_wghts.m +%-------------------------------------------------------------------------- + + +% Checking input arguments and setting default values +pp = inputParser; +pp.addRequired('inpt', @(x)iscell(x)); % Input dataset (cell) +pp.addOptional('clms', [3 4 8], @(x) isnumeric(x)); % Columns of m/y/flds +pp.addOptional('time', 0, @(x) isnumeric(x)); % start and end time +pp.addOptional('domweight', 1, @(x) isnumeric(x)); % Apply days of month weighting +pp.parse(inpt, varargin{:}) + +clms = pp.Results.clms; +time = pp.Results.time; +domweight = pp.Results.domweight; + + + +if size(clms) == [1 1] % No time information is available + if domweight == 1 + warning('No time infomration provided!!! Skipping dom weighting') + end + indx_c = (1:length(inpt))'; + indx_b = [1; indx_c(1:end-1)]; + indx_f = [indx_c(2:end); indx_c(end)]; + + div = [1; ones(length(indx_c)-2,1)*2; 1]; + +else + + mnth = cell2mat(inpt(:, clms(1))); + yr = cell2mat(inpt(:, clms(2))); + dom = eomday(yr, mnth); + flds = inpt(:,clms(3)); + + if time == 0 + sind = 1; + eind = length(flds); + else + sind = find((mnth) == 1 & yr == time(1)); + eind = find((mnth) == 12 & yr == time(2)); + end + + indx_c = (sind:1:eind)'; + div = zeros(length(indx_c),1); + + +% if sind == 1 +% indx_b = [1; indx_c(1:end-1-1)]; +% div(1) = [1; ones(length(indx_c)-2,1)*2; 1]; +% else +% indx_b = [sind-1; indx_c(1:end-1-1)]; +% end +% +% if eind == numel(flds) +% indx_f = [indx_c(2:end); indx_c(end)]; +% else +% indx_f = [indx_c(2:end); eind+1]; +% end + +end + + + + +fields = inpt(sind:eind, clms); +n = length(fields); + +keyboard +for i = 1:n + ddt{i,1} = fields{i,1}; + ddt{i,2} = fields{i,2}; + if i == 1 + ddt{i,3} = (dom(i+1)*fields{i+1,3} - dom(i)*fields{i,3})/(dom(i+1) + dom(i)); + elseif i == n + ddt{i,3} = (dom(i)*fields{i,3} - dom(i-1)*fields{i-1,3})/(dom(i) + dom(i-1)); + else + ddt{i,3} = (dom(i+1)*fields{i+1,3} - dom(i-1)*fields{i-1,3})/(dom(i+1)/2 + dom(i) + dom(i-1)/2); + end +end + + + + diff --git a/cdiffts.m b/cdiffts.m new file mode 100644 index 0000000..40935ea --- /dev/null +++ b/cdiffts.m @@ -0,0 +1,29 @@ +function ddt = cdiffts(inpt, tsrs, varargin) + +pp = inputParser; +pp.addRequired('inpt', @ismatrix); % Input dataset (cell) +pp.addRequired('tsrs', @isvector); % Id_map (matrix) +pp.addParamValue('dt', 1, @isnumeric); +pp.addParamValue('clms', [1 2 3], @isnumeric); % Columns with m/y/dta + +pp.parse(inpt, tsrs, varargin{:}) + +clms = pp.Results.clms; +dt = pp.Results.dt; + +sind = find(inpt(:,1) == tsrs(1) & inpt(:,2) == tsrs(2)); +eind = find(inpt(:,1) == tsrs(3) & inpt(:,2) == tsrs(4)); + +n_ts = size(inpt,1); + + + +if sind > 1 && eind < n_ts + bw = inpt(sind-1:eind-1,clms(3):end); + fw = inpt(sind+1:eind+1,clms(3):end); + ddt = zeros(size(bw,1), size(bw,2)+2); + + ddt(:,1) = inpt(sind:eind, clms(1)); + ddt(:,2) = inpt(sind:eind, clms(2)); + ddt(:, 3:end) = (fw(:,:) - bw)/dt; +end \ No newline at end of file diff --git a/cell2catchmat.m b/cell2catchmat.m new file mode 100644 index 0000000..ff464da --- /dev/null +++ b/cell2catchmat.m @@ -0,0 +1,100 @@ +function [F, cindx] = cell2catchmat(flds, mask, mval, arr) +% The function rearranges the elements in an cell-array (e.g. an array of +% maps) to a big matrix, which has a number of rows equal to the number of +% time-steps in flds and a number of columns equal to the pixels of a +% single field. The function allows the consideration of a mask to reduce +% the size of a matrix, if e.g. large parts of the input fields contain +% missing values or if further computations are needed for a specific area +% only. +%-------------------------------------------------------------------------- +% Input: flds {m x 1} Cell array (or single matrix) which contains +% the input fields. +% mask [r x c] Binary mask for removing undesired pixels from +% the flds-cells +% mval [1 x 1] If mval is set, the function also searches for +% missing values in flds and removes these +% elements +% Default: -9999 +% arr 1 or 2 arr = 1: Longitude ordering +% arr = 2: Latitude ordering +% Detault: arr = 1; +% Output: F [m x n] Matrix which has a number of rows equal to +% the number of timesteps in flds (i.e. m) +% and a number of columns equal to the number +% of ones (and the number of missing values) +% in mask +% cindx [n x 1] Column-vector which contains the positions +% of the valid elements in flds +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: July 2011 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +if nargin < 4, arr = 1; end +if nargin < 3, mval = -9999; end + +if isnumeric(flds) + + if nargin < 2, mask = ones(flds); end + if isnan(mval) + mask(isnan(flds)) = 0; + else + mask(flds == mval) = 0; + end + + % Store the 2D-mask in a long column-vector + if arr == 2, mask = mask'; end + mask_vec = mask(:); + + % Find the positions of the elements which are ~= 0 + cindx = find(mask_vec == 1); + + if arr == 2, flds == flds'; end + tmp = flds(:); + F(1, :) = tmp(cindx); + + +elseif iscell(flds) + + nts = length(flds); + + if nargin < 2, mask = ones(flds{1}); end + if isnan(mval) + mask(isnan(flds{1})) = 0; + else + mask(flds{1} == mval) = 0; + end + + + % Store the 2D-mask in a long column-vector + if arr == 2, mask = mask'; end + mask_vec = mask(:); + + % Find the positions of the elements which are ~= 0 + cindx = find(mask_vec == 1); + + % Create the matrix F... + F = zeros(nts, sum(sum(mask))); + + for i = 1:length(flds) + if arr == 2, flds{i} = flds{i}'; end + tmp = flds{i}(:); + F(i,:) = tmp(cindx); + end +end + + + + + + + + + + + + + + diff --git a/cell2netcdf.m b/cell2netcdf.m new file mode 100644 index 0000000..d48ffd2 --- /dev/null +++ b/cell2netcdf.m @@ -0,0 +1,122 @@ +function [] = cell2netcdf(inpt, theta, lambda) +% The function converts a cell array in Matlab into a netcdf file. +% Currently it supports daily and monthly datasets, depending on the number +% of columns of inpt: +% - 3 columns: monthly data +% - 4 columns: daily data +% Latitude and Longitude must be provided as well. If these are not known, +% the output file does not match the netcdf-conventions. For such data, use +% the function mat2netcdf. +%-------------------------------------------------------------------------- +% Input (mandatory): +% - inpt {m x 3} Cell array which contains monthly input fields. +% The first two columns must contain month and year, +% the third column must contain the (i x j) data field +% {m x 4} Cell array which contains daily input fields. +% The first three columns must contain day, month, and +% year, the fourth column must contain the data field +% - theta i Vector containing the latitudes of the fields +% - lambda j Vector containing the longitudes of the fields +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: January 2013 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- +fprintf('\n') +fprintf('---------------------------------------------------- \n') +fprintf('Conversion from a MATLAB-cell-array to a netcdf-file \n') +fprintf('---------------------------------------------------- \n') +fprintf(' \n') +outnme = input('Enter output filename: ', 's'); +varnme = input('Enter variable name for netcdf-file: ', 's'); +units = input('Enter units of variable: ', 's'); +longnme = input('Enter variable description: ', 's'); +mval = input('Enter identifier for missing values: ', 's'); +fprintf('---------------------------------------------------- \n') +fprintf('Computing.... ') + +% Get the size of the input cell array +% n -> number of timesteps +% p -> number of input columns +[n,p] = size(inpt); + +% Set all the missing elements to -99999 +if ~isnan(mval) + for i = 1:n + inpt{i,p}(inpt{i,p} == str2num(mval)) = -99999; + end +end + +% Compute the integer-time for the time-steps of the input dataset. +% Afterwards, Jan 15 2010 would look like 20100115. +if p == 4 % Daily dataset + dys = cell2mat(inpt(:,1)); + mnths = cell2mat(inpt(:,2)); + yrs = cell2mat(inpt(:,3)); + inttme = yrs*10000 + mnths*100 + dys; + + for i = 1:n + bigmat(:,:,i) = inpt{i,4}'; + end + + + +elseif p == 3 % Monthly dataset + mnths = cell2mat(inpt(:,1)); + yrs = cell2mat(inpt(:,2)); + inttme = yrs*10000 + mnths*100 + ones(n,1)*15; + + for i = 1:n + bigmat(:,:,i) = inpt{i,3}'; + end + + +end + +% Rearrange the cell-array in a big 3D-matrix + +% Create a netcdf-file and set the correct dimensions and dimension +% variables +ncid = netcdf.create(outnme, 'CLOBBER'); + +time_dim_id = netcdf.defDim(ncid, 'time', length(inttme)); +lon_dim_id = netcdf.defDim(ncid, 'longitude', length(lambda)); +lat_dim_id = netcdf.defDim(ncid, 'latitude', length(theta)); + +time_var_id = netcdf.defVar(ncid, 'time', 'double', time_dim_id); +lon_var_id = netcdf.defVar(ncid, 'longitude', 'double', lon_dim_id); +lat_var_id = netcdf.defVar(ncid, 'latitude', 'double', lat_dim_id); + +% Create the data variable with the correct dimensions +data_var_id = netcdf.defVar(ncid, varnme, 'double', [lon_dim_id ... + lat_dim_id ... + time_dim_id]); + + +% Set the attributes of the variables +netcdf.putAtt(ncid, time_var_id, '_CoordinateAxisType', 'Time'); + +netcdf.putAtt(ncid, lon_var_id, 'units', 'degrees_east'); +netcdf.putAtt(ncid, lon_var_id, 'long_name', 'longitude'); +netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', 'Lon'); + +netcdf.putAtt(ncid, lat_var_id, 'units', 'degrees_north'); +netcdf.putAtt(ncid, lat_var_id, 'long_name', 'latitude'); +netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', 'Lat'); + +netcdf.putAtt(ncid, data_var_id, 'units', units); +netcdf.putAtt(ncid, data_var_id, 'long_name', longnme); +netcdf.putAtt(ncid, data_var_id, 'missing_value', -99999); + +netcdf.endDef(ncid); + +% Write the variables to the netcdf-file +netcdf.putVar(ncid, time_var_id, inttme); +netcdf.putVar(ncid, lon_var_id, lambda); +netcdf.putVar(ncid, lat_var_id, theta); +netcdf.putVar(ncid, data_var_id, bigmat); + +netcdf.close(ncid); + +fprintf('Done \n') \ No newline at end of file diff --git a/cell2netcdf2.m b/cell2netcdf2.m new file mode 100644 index 0000000..4a260e0 --- /dev/null +++ b/cell2netcdf2.m @@ -0,0 +1,57 @@ +function [] = mat2netcdf(inpt, varname, lat, lon, clms, outnme, mval) + + + +time = [cell2mat(inpt(:,clms(1))) cell2mat(inpt(:,clms(2)))]; + + + + + +for i = 1:size(time,1) + + if time(i,1) < 10 + outnme_f = [outnme, num2str(time(i,2)), '0', num2str(time(i,1)), '.nc']; + else + outnme_f = [outnme, num2str(time(i,2)), num2str(time(i,1)), '.nc']; + end + + ncid = netcdf.create(outnme_f, 'NOCLOBBER'); + time_dim_id = netcdf.defDim(ncid, 'time', 1); + lon_dim_id = netcdf.defDim(ncid, 'lon', length(lon)); + lat_dim_id = netcdf.defDim(ncid, 'lat', length(lat)); + + time_var_id = netcdf.defVar(ncid, 'time', 'double', time_dim_id); + lon_var_id = netcdf.defVar(ncid, 'lon', 'double', lon_dim_id); + lat_var_id = netcdf.defVar(ncid, 'lat', 'double', lat_dim_id); + + data_var_id = netcdf.defVar(ncid, varname{1}, 'double', ... + [lon_dim_id lat_dim_id time_dim_id]); + netcdf.endDef(ncid); + + netcdf.putVar(ncid, time_var_id, 0); + netcdf.putVar(ncid, lon_var_id, lon); + netcdf.putVar(ncid, lat_var_id, lat); + netcdf.putVar(ncid, data_var_id, inpt{i,clms(3)}'); + + netcdf.reDef(ncid) + netcdf.putAtt(ncid, time_var_id, 'units', 'hours since 1'); + netcdf.putAtt(ncid, time_var_id, '_CoordinateAxisType', 'Time'); + + netcdf.putAtt(ncid, lon_var_id, 'units', 'degrees_east'); + netcdf.putAtt(ncid, lon_var_id, 'long_name', 'Longitude'); + netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', 'Lon'); + + netcdf.putAtt(ncid, lat_var_id, 'units', 'degrees_north'); + netcdf.putAtt(ncid, lat_var_id, 'long_name', 'Latitude'); + netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', 'Lat'); + + netcdf.putAtt(ncid, data_var_id, 'units', inpt{1,clms(4)}); + netcdf.putAtt(ncid, data_var_id, 'long_name', varname{2}); + netcdf.putAtt(ncid, data_var_id, 'missing_value', mval); + + netcdf.close(ncid); +end + + + \ No newline at end of file diff --git a/cellmovav.m b/cellmovav.m new file mode 100644 index 0000000..4a06cab --- /dev/null +++ b/cellmovav.m @@ -0,0 +1,61 @@ +function otpt = cellmovav(inpt, clms, mval, method) + + +if nargin < 4, method = 'wavg'; end +if nargin < 3, mval = -9999; end +if nargin < 2, clms = [3 4 8]; end + +nts = length(inpt); +mnths = cell2mat(inpt(:, clms(1))); +yrs = cell2mat(inpt(:, clms(2))); + + +if strcmp(method, 'wavg') + dom = eomday(yrs, mnths); + + for i = 1:nts + otpt{i,1} = mnths(i); + otpt{i,2} = yrs(i); + + if i == 1 + otpt{i, 3} = (3*dom(i)*inpt{i, clms(3)} + ... + dom(i+1)*inpt{i+1, clms(3)})/sum([3*dom(i) dom(i+1)]); + elseif i == nts + otpt{i, 3} = (3*dom(i)*inpt{i, clms(3)} + ... + dom(i-1)*inpt{i-1, clms(3)})/sum([3*dom(i) dom(i-1)]); + else + otpt{i, 3} = (dom(i-1)*inpt{i-1, clms(3)} + ... + 2*dom(i)*inpt{i, clms(3)} + ... + dom(i+1)*inpt{i+1, clms(3)})/sum([dom(i-1) 2*dom(i) dom(i+1)]); + end + + otpt{i, 3}(inpt{i, clms(3)} == mval) = mval; + end + +elseif strcmp(method, 'avg') + + for i = 1:nts + otpt{i,1} = mnths(i); + otpt{i,2} = yrs(i); + + if i == 1 + otpt{i, 3} = 3/4*inpt{i, clms(3)} + 1/4*inpt{i+1, clms(3)}; + elseif i == nts + otpt{i, 3} = 3/4*inpt{i, clms(3)} + 1/4*inpt{i-1, clms(3)}; + else + otpt{i, 3} = 1/4*inpt{i-1, clms(3)} + 1/2*inpt{i, clms(3)} + ... + 1/4*inpt{i+1, clms(3)}; + end + + otpt{i, 3}(inpt{i, clms(3)} == mval) = mval; + end + +end + + + + + + + + \ No newline at end of file diff --git a/cells2database.m b/cells2database.m new file mode 100644 index 0000000..1e8d3a1 --- /dev/null +++ b/cells2database.m @@ -0,0 +1,76 @@ +function otpt = cells2database(inpt, syr, smnth); + +fprintf('\n') +fprintf('---------------------------------------------------- \n') +fprintf('Convert a "unordered" cell array to a data array \n') +fprintf('---------------------------------------------------- \n') +fprintf(' \n') +cntrnme = input('Data center: ', 's'); +varnme = input('Variable name: ', 's'); + +units = input('Enter units of variable: ', 's'); +dly = input('Daily dataset?: ', 's'); +fprintf('---------------------------------------------------- \n') + + +mnth = smnth; +yr = syr; +N = length(inpt); + +if strcmp(dly, 'y') + + day = 1; + for i = 1:N + otpt{i,1} = cntrnme; + otpt{i,2} = varnme; + otpt{i,3} = day; + otpt{i,4} = mnth; + otpt{i,5} = yr; + otpt{i,6} = 'Global'; + otpt{i,7} = 89.75:-0.5:-89.75; + otpt{i,8} = -179.75:0.5:179.75; + otpt{i,9} = inpt{i}; + otpt{i,10} = units; + + nr_days = eomday(yr, mnth) + day = day + 1; + + if day > nr_days + day = 1; + mnth = mnth + 1; + + if mnth == 13 + mnth = 1; + yr = yr + 1; + end + end + end + +else + + for i = 1:N + otpt{i,1} = cntrnme; + otpt{i,2} = varnme; + otpt{i,3} = mnth; + otpt{i,4} = yr; + otpt{i,5} = 'Global'; + otpt{i,6} = 89.75:-0.5:-89.75; + otpt{i,7} = -179.75:0.5:179.75; + + for j = 1:size(inpt,2) + otpt{i,j+7} = inpt{i,j}; + end + otpt{i,j+8} = units; + + mnth = mnth + 1; + if mnth == 13 + mnth = 1; + yr = yr + 1; + end + end +end + + + + + diff --git a/celltrnd.m b/celltrnd.m new file mode 100644 index 0000000..9199470 --- /dev/null +++ b/celltrnd.m @@ -0,0 +1,71 @@ +function [a, b, varargout] = celltrnd(inpt, mask, mval, clms, method) + + +if size(clms) == 1 + findx = clms(1); +elseif size(clms) == 2 + findx = clms(2); +else + findx = clms(3); +end + +[r, c] = size(inpt{1, findx}); + +[F, c_indx] = cell2catchmat(inpt(:, findx), mask); + + +[nts, npxls] = size(F); +% F = F - ones(nts, 1)*mean(F); +t = (0:nts-1)'; + +A = [ones(nts, 1) t]; + +if strcmp(method, 'daywghts') & size(clms) == 3 + mnths = cell2mat(inpt(:, clms(1))); + yrs = cell2mat(inpt(:, clms(2))); + + dom = eomday(yrs, mnths); + P = diag(dom); + +elseif strcmp(method, 'daywghts') & size(clms) == 2 + + yrs = cell2mat(inpt(:, clms(1))); + + for i = 1:length(yrs) + doy(i) = sum(eomday(yrs(i), 1:12)); + end + + P = diag(doy); + +else + P = eye(nts); +end + + + +for i = 1:npxls + + N = inv(A'*P*A); + xht = N*A'*P*F(:,i); + + a_vec(i) = xht(1); + b_vec(i) = xht(2); + + sigma_aa_vec(i) = N(1,1); + sigma_bb_vec(i) = N(2,2); + sigma_ab_vec(i) = N(1,2); + + +end + +a = catchmat2cell(a_vec, c_indx, r, c, mval); +b = catchmat2cell(b_vec, c_indx, r, c, mval); + +if nargout > 2 + varargout{1} = catchmat2cell(sigma_aa_vec, c_indx, r, c, mval); + varargout{2} = catchmat2cell(sigma_bb_vec, c_indx, r, c, mval); + varargout{3} = catchmat2cell(sigma_ab_vec, c_indx, r, c, mval); +end + + + diff --git a/center_ts.m b/center_ts.m new file mode 100644 index 0000000..492eee9 --- /dev/null +++ b/center_ts.m @@ -0,0 +1,103 @@ +function [otpt, A, x, comps] = center_ts(inpt, method, mval, mnswitch) + +if nargin < 4, mnswitch = 1; end +if nargin < 3, mval = -9999; end +if nargin < 2, method = 4; end + +[nlats, nlons] = size(inpt{1}); +nts = length(inpt); + +for i = 1:nts + F(i,:) = inpt{i}(:); +end + +c_m = find(F(1,:) == mval); +c_v = find(F(1,:) ~= mval); + +otpt_t = F; +otpt_t(:, c_m) = []; + + +if mnswitch + mn = mean(otpt_t, 1); + otpt_t = otpt_t - ones(nts, 1)*mn; +end + +[nts, ngrd] = size(F); + +t = (0:nts-1)'; +if method == 1 + A = ones(nts, 1); +elseif method == 2 + A = [ones(nts, 1) t]; +elseif method == 3 + A = [ones(nts,1) t cos(pi/6*t) sin(pi/6*t)]; +elseif method == 4 + A = [ones(nts,1) t cos(pi/6*t) sin(pi/6*t) cos(pi/3*t) sin(pi/3*t)]; +elseif method == 5 + otpt_t = detrend(otpt_t); + A = [cos(pi/6*t) sin(pi/6*t) cos(pi/3*t) sin(pi/3*t) cos(pi/2*t) sin(pi/2*t)]; +end + +for i = 1:size(otpt_t, 2) + x(:,i) = inv(A'*A)*A'*otpt_t(:, i); +end + +rem = A*x; + +ncomps = size(x, 1); +comps = zeros(ncomps, ngrd); +for i = 1:ncomps + tmp = x(i,:); + comps(i, c_m) = mval; + comps(i, c_v) = tmp; +end + +otpt = zeros(nts, ngrd); +otpt(:, c_m) = mval; +otpt(:, c_v) = otpt_t - rem; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/centerts.m b/centerts.m new file mode 100644 index 0000000..3d636e0 --- /dev/null +++ b/centerts.m @@ -0,0 +1,23 @@ +function [rsdl, estts, mn, trnd, yrcle] = centerts(inpt) + +n = length(inpt); + +% Remove the mean +A = ones(length(inpt), 1); +mn = 1/n*(A'*inpt); + +cntr = inpt - mn; + +% Estimate the trend and the annual cycle +tmp = eye(12); +A = [(1:n)' repmat(tmp, [n/12, 1])]; +xht = A\cntr; +estts = A*xht + mn; + +rsdl = inpt - estts; +trnd = xht(1); +yrcle = xht(2:end); + + + + diff --git a/central_diff.m b/central_diff.m new file mode 100644 index 0000000..471681d --- /dev/null +++ b/central_diff.m @@ -0,0 +1,83 @@ +function ddt = cdiffcell(inpt, varargin); +% The function computes the first derivative of a given input dataset inpt +% according to the method of central differences. For the first (last) +% field, the derivative is computed according to forward (backward) +% differences. +%-------------------------------------------------------------------------- +% Input: inpt {m x n} Cell array which contains the input +% fields. +% clms [1 x 3] column indices of the input containing +% month, year and the corresponding field +% [1 x 1] column index of the input fields +% miss [1 x 1] value of undefined elements in the +% input fields +% id_map [i x j] Map which defines the different areas +% area_id [1 x k] Vector (or scalar) which contains the +% ids of the desired areas +% +% Output: otpt [m x k] matrix containing the area-weighted +% means +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: July 2011 +%-------------------------------------------------------------------------- +% Uses: area_wghts.m +%-------------------------------------------------------------------------- + + +% Checking input arguments and setting default values +pp = inputParser; +pp.addRequired('inpt', @(x)iscell(x)); % Input dataset (cell) +pp.addOptional('clms', [4 5 9], @(x) isnumeric(x)); % Columns of m/y/flds +pp.addOptional('time', [0 0 0 0], @(x) isnumeric(x)); % start and end time +pp.parse(inpt, varargin{:}) + +clms = pp.Results.clms; +time = pp.Results.time; + +keyboard + + +if size(clms) == [1 1] % No time information is available + + indx_c = (1:length(inpt))'; + indx_b = [1; indx_c(1:end-1)]; + indx_f = [indx_c(2:end); indx_c(end)]; + + div = [1; ones(length(indx_c)-2,1)*2; 1]; + +else + + mnth = cell2mat(inpt(:, clms(1))); + yr = cell2mat(inpt(:, clms(2))); + flds = inpt(:,clms(3)); + + sind = find((mnth) == time(1) & yr == time(2)) + eind = find((mnth) == time(3) & yr == time(4)); + +end +keyboard + + + + + +fields = inpt(sind:eind, clms); +n = length(fields); + + +for i = 1:n + ddt{i,1} = fields{i,1}; + ddt{i,2} = fields{i,2}; + if i == 1 + ddt{i,3} = fields{i+1,3} - fields{i,3}; + elseif i == n + ddt{i,3} = fields{i,3} - fields{i-1,3}; + else + ddt{i,3} = 1/2*(fields{i+1,3} - fields{i-1,3}); + end +end + + + + diff --git a/central_diff2d.m b/central_diff2d.m new file mode 100644 index 0000000..b0b927c --- /dev/null +++ b/central_diff2d.m @@ -0,0 +1,31 @@ +function [dx, dy] = central_diff2d(inpt); +% The function computes the first derivative of a given input dataset +% according to the method of central differences. + +dx = zeros(size(inpt)); +dy = zeros(size(inpt)); + +tmp1 = [inpt(:, end) inpt(:,1:end-1)]; +tmp2 = [inpt(:, 2:end) inpt(:,1)]; + +dx = (tmp2 - tmp1)/2; + +clear tmp1 tmp2 + +tmp1 = inpt(1:end-2,:); +tmp2 = inpt(3:end,:); + +dy(2:end-1,:) = (tmp2-tmp1)/2; + +dy(1,:) = inpt(2,:) - inpt(1,:); +dy(end,:) = inpt(end,:) - inpt(end-1,:); + + + + + + + + + + diff --git a/chkfrmt.m b/chkfrmt.m new file mode 100755 index 0000000..b142e85 --- /dev/null +++ b/chkfrmt.m @@ -0,0 +1,141 @@ +function [frmt, lmx, otpt] = chkfrmt(inp, otptfrmt) + +% The function checks the format of the sh-input data inp, which can be a +% vector or a matrix. If the argument otptfrmt is given, the input data is +% rearranged in the specified output format +%-------------------------------------------------------------------------- +% Input: inp matrix in c\s or s|c format or colombo ordered +% vector with sh-coefficients +% otptfrmt desired output format of the input coefficients: +% - 'cs' -> inp is rearranged to the c\s format +% - 'sc' -> inp is rearranged to the s|c format +% - 'rvec' -> inp is rearranged to a colombo ordered +% row-vector +% - 'cvec' -> inp is rearranged to a colombo ordered +% column-vector +% - 'fmat' -> inp is a full (lmx+1)²+(lmx+1)² matrix +% if otptfrmt is not defined, the output format +% will be the same as the format of the input data +% +% Output frmt format of the input data +% - 'cs' -> inp is in c\s format +% - 'sc' -> inp is in s|c format +% - 'rvec' -> inp is a colombo ordered row-vector +% - 'cvec' -> inp is a colombo ordered column-vector +% +% lmx maximal degree of expansion of the input data +% +% otpt optional output data, which was rearranged +% according to the parameter otptfrmt +% +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: 10.01.2010 +%-------------------------------------------------------------------------- +% Uses: sc2cs.m, cs2sc.m, sh_sort.m, mat2vec.m +%-------------------------------------------------------------------------- +[r, c] = size(inp); + +if r == c && r <= 120 + % inp is in c\s format + frmt = 'cs'; + lmx = r - 1; +elseif r > 1 && c == 1 + % inp is a row-vector + frmt = 'rvec'; + lmx = sqrt(r) - 1; +elseif r == 1 && c > 1 + % inp is a column-vector + frmt = 'cvec'; + lmx = sqrt(c) - 1; +elseif c == 2*r - 1 + % inp is in s|c format + frmt = 'sc'; + lmx = r - 1; +elseif r == c && r > 120 + % inp is a full (lmx+1)²+(lmx+1)² matrix + frmt = 'fmat'; + lmx = sqrt(r) - 1; +else + error('Input data has unknown format!') +end + +if nargin < 2 + otptfrmt = frmt; +end + +if nargout > 2 + + if strcmp(otptfrmt,frmt) + otpt = inp; + + % Output format: c\s + elseif strcmp(otptfrmt, 'cs') + + if strcmp(frmt, 'sc') + otpt = sc2cs(inp); + elseif strcmp(frmt, 'rvec') + otpt = sh_sort(inp, 'cs'); + elseif strcmp(frmt, 'cvec') + otpt = inp'; + otpt = sh_sort(otpt, 'cs'); + elseif strcmp(frmt, 'fmat') + otpt = sh_sort(diag(inp), 'cs'); + end + + % Output format: row-vector + elseif strcmp(otptfrmt, 'rvec') + + if strcmp(frmt, 'cs') + otpt = mat2vec(inp); + elseif strcmp(frmt, 'sc') + otpt = sc2cs(inp); + otpt = mat2vec(otpt); + elseif strcmp(frmt, 'cvec') + otpt = inp'; + elseif strcmp(frmt, 'fmat') + otpt = diag(inp); + end + + % Output format: column-vector + elseif strcmp(otptfrmt, 'cvec') + + if strcmp(frmt, 'cs') + otpt = mat2vec(inp); + otpt = otpt'; + elseif strcmp(frmt, 'sc') + otpt = sc2cs(inp); + otpt = mat2vec(otpt); + otpt = otpt'; + elseif strcmp(frmt, 'rvec') + otpt = inp'; + elseif strcmp(frmt, 'fmat') + otpt = diag(inp)'; + end + + % Output format: s|c-format + elseif strcmp(otptfrmt, 'sc') + + if strcmp(frmt, 'cs') + otpt = cs2sc(inp, 0); + elseif strcmp(frmt, 'rvec') + otpt = sh_sort(inp, 'sc'); + elseif strcmp(frmt, 'cvec') + otpt = sh_sort(inp', 'sc'); + elseif strcmp(frmt, 'fmat') + otpt = sh_sort(diag(inp), 'sc'); + end + % Output format: full matrix + elseif strcmp(otptfrmt, 'fmat') + if strcmp(frmt, 'cs') + otpt = diag(mat2vec(inp)); + elseif strcmp(frmt, 'rvec') || strcmp(frmt, 'cvec') + otpt = diag(inp); + elseif strcmp(frmt, 'sc') + otpt = diag(mat2vec(sc2cs(inp))); + end + + end +end + + \ No newline at end of file diff --git a/ciplot.m b/ciplot.m new file mode 100644 index 0000000..0aa9318 --- /dev/null +++ b/ciplot.m @@ -0,0 +1,38 @@ +function ciplot(lower,upper,x,colour); + +% ciplot(lower,upper) +% ciplot(lower,upper,x) +% ciplot(lower,upper,x,colour) +% +% Plots a shaded region on a graph between specified lower and upper confidence intervals (L and U). +% l and u must be vectors of the same length. +% Uses the 'fill' function, not 'area'. Therefore multiple shaded plots +% can be overlayed without a problem. Make them transparent for total visibility. +% x data can be specified, otherwise plots against index values. +% colour can be specified (eg 'k'). Defaults to blue. + +% Raymond Reynolds 24/11/06 + +if length(lower)~=length(upper) + error('lower and upper vectors must be same length') +end + +if nargin<4 + colour='b'; +end + +if nargin<3 + x=1:length(lower); +end + +% convert to row vectors so fliplr can work +if find(size(x)==(max(size(x))))<2 +x=x'; end +if find(size(lower)==(max(size(lower))))<2 +lower=lower'; end +if find(size(upper)==(max(size(upper))))<2 +upper=upper'; end + +fill([x fliplr(x)],[upper fliplr(lower)],colour) + + diff --git a/colorgrad.m b/colorgrad.m new file mode 100644 index 0000000..bcee432 --- /dev/null +++ b/colorgrad.m @@ -0,0 +1,53 @@ +% +% Copyright (C) 2011-2012 Alex Bikfalvi +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 3 of the License, or (at +% your option) any later version. + +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. + +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +% + +function c = colorgrad(varargin) + +n = 16; +t = 'blue_down'; + +switch length(varargin) + case 1 + n = varargin{1}; + case 2 + n = varargin{1}; + t = varargin{2}; +end + +switch lower(t) + case 'blue_up' + c = cat(2,linspace(0,0.6,n)',linspace(0.2,0.8,n)',linspace(0.6,1,n)'); + case 'blue_down' + c = cat(2,linspace(0.6,0,n)',linspace(0.8,0.2,n)',linspace(1,0.6,n)'); + case 'orange_up' + c = cat(2,linspace(1,248/255,n)',linspace(0.6,224/255,n)',linspace(0,124/255,n)'); + case 'orange_down' + c = cat(2,linspace(248/255,1,n)',linspace(224/255,0.6,n)',linspace(124/255,0,n)'); + case 'green_up' + c = cat(2,linspace(0.2,0.6,n)',linspace(0.6,1,n)',linspace(0.2,0.6,n)'); + case 'green_down' + c = cat(2,linspace(0.6,0.2,n)',linspace(1,0.6,n)',linspace(0.6,0.2,n)'); + case 'red_up' + c = cat(2,linspace(.8,1,n)',linspace(.2,.6,n)',linspace(.2,.6,n)'); + case 'red_down' + c = cat(2,linspace(1,.8,n)',linspace(.6,.2,n)',linspace(.6,.2,n)'); + otherwise + error('No such color gradient.'); +end + +end \ No newline at end of file diff --git a/comp_cont_corr.m b/comp_cont_corr.m new file mode 100644 index 0000000..06f3495 --- /dev/null +++ b/comp_cont_corr.m @@ -0,0 +1,133 @@ +function [R E sig] = comp_glob_corr(inpt1, inpt2, time, tscale, contindx, clms, mval) + +% The function computes the area averaged mean over the continents (cswitch = 1), +% over the oceans (cswitch = 2) + +% if nargin < 6 +% mval = -9999; +% end +% +% if nargin < 5 +% clms = [4 5 9]; +% end +% +% if nargin < 4 +% tscale = 'complete'; +% end +% +% if nargin < 3 +% cswitch = 0; +% end + + +sindx1 = find(cell2mat(inpt1(:,clms(1))) == 1 & ... + cell2mat(inpt1(:,clms(2))) == time(1)); +eindx1 = find(cell2mat(inpt1(:,clms(1))) == 12 & ... + cell2mat(inpt1(:,clms(2))) == time(2)); + +sindx2 = find(cell2mat(inpt2(:,clms(1))) == 1 & ... + cell2mat(inpt2(:,clms(2))) == time(1)); +eindx2 = find(cell2mat(inpt2(:,clms(1))) == 12 & ... + cell2mat(inpt2(:,clms(2))) == time(2)); + +fields = cell(eindx1-sindx1+1, 4); +fields(:, 1:3) = inpt1(sindx1:eindx1, clms); +fields(:, 4) = inpt2(sindx2:eindx2, clms(3)); +clear inpt* + +if strcmp(mval, 'NaN') + for i = 1:length(fields) + fields{i,3}(isnan(fields3{i,3})) = -9999; + fields{i,4}(isnan(fields3{i,3})) = -9999; + end + mval = -9999; +end + +A = area_wghts(0.25:0.5:179.75, 0.5); +A = A'*ones(1,720); + +% Computing a mask for the different setups +mask = zeros(360, 720); +load continents.asc + +if strcmp(tscale, 'complete') + R = zeros(length(fields), 2 + length(contindx)); + R(:,1) = cell2mat(fields(:,1)); + R(:,2) = cell2mat(fields(:,2)); + E = R; + sig = R; + + for i = 1:length(contindx) + mask = zeros(360, 720); + mask(continents == contindx(i)) = 1; + + [R(:,i+2) E(:,i+2), sig(:,i+2)] = cellfun(@(x,y) spat_agg_corr(x,y, ... + mask, mval, A), fields(:,3), fields(:,4)); + end + +elseif strcmp(tscale, 'monthly') + + R = zeros(12, length(contindx) + 1); + R(:,1) = 1:12; + + E = R; + sig = R; + + for i = 1:length(contindx) + mask = zeros(360, 720); + mask(continents == contindx(i)) = 1; + + for j = 1:12 + indx = find(cell2mat(fields(:,1)) == j); + [tmp_r tmp_e tmp_sig] = cellfun(@(x,y) spat_agg_corr(x,y, ... + mask, mval, A), fields(indx, 3), fields(indx, 4)); + R(j,i+1) = mean(tmp_r); + E(j,i+1) = mean(tmp_e); + R(j,i+1) = mean(tmp_sig); + end + end + +elseif strcmp(tscale, 'annual') + + j_indx = 1:12:length(fields); + R = zeros(length(j_indx), length(contindx) + 1); + E = R; + sig = R; + + for i = 1:length(contindx) + mask = zeros(360, 720); + mask(continents == contindx(i)) = 1; + + R(:,1) = fields{1,2}:1:fields{end,2}; + + for j = 1:length(j_indx) + indx = find(cell2mat(fields(:,2)) == fields{j_indx(j),2}); + [tmp_r tmp_e tmp_sig] = cellfun(@(x,y) spat_agg_corr(x,y, ... + mask, mval, A), fields(indx, 3), fields(indx, 4)); + R(j,i+1) = mean(tmp_r); + E(j,i+1) = mean(tmp_e); + sig(j,i+1) = mean(tmp_sig); + end + end + +elseif strcmp(tscale, 'monthly_s') + % Single monthly output does only allow one continent + R = zeros(12, length(fields)/12 + 1); + R(:,1,1) = 1:12; + + E = R; + sig = R; + + mask = zeros(360, 720); + mask(continents == contindx) = 1; + + for j = 1:12 + indx = find(cell2mat(fields(:,1)) == j); + [tmp_r tmp_e tmp_sig] = cellfun(@(x,y) spat_agg_corr(x,y, ... + mask, mval, A), fields(indx, 3), fields(indx, 4)); + R(j,2:end) = tmp_r'; + E(j,2:end) = tmp_e'; + sig(j,2:end) = tmp_sig'; + end +end + diff --git a/comp_cont_quant.m b/comp_cont_quant.m new file mode 100644 index 0000000..26daa96 --- /dev/null +++ b/comp_cont_quant.m @@ -0,0 +1,193 @@ +function glob_val = comp_cont_quant(inpt, time, tscale, cindx, clms, mval, dimswitch) + +% if nargin < 5 +% mval = -9999; +% end +% +% if nargin < 4 +% clms = [4 5 9]; +% end +% +% if nargin < 3 +% tscale = 'complete'; +% end +% +% if nargin < 2 +% cswitch = 0; +% end + + +sind = find(cell2mat(inpt(:,clms(1))) == 1 & ... + cell2mat(inpt(:,clms(2))) == time(1)); +eind = find(cell2mat(inpt(:,clms(1))) == 12 & ... + cell2mat(inpt(:,clms(2))) == time(2)); +fields = inpt(sind:eind, clms); + +if strcmp(mval, 'NaN') + for i = 1:length(fields) + fields{i,3}(isnan(fields{i,3})) = -9999; + end + mval = -9999; +end + +load continents.asc + +% Computing the area of the pixels +A = area_wghts(0.25:0.5:179.75, 0.5); +A = A'*ones(1,720); + +% Computing a mask for the different setups +mask_t = gen_mask(cswitch); + + +if strcmp(tscale, 'complete') + + if dimflg == 1 + nrd = cellfun(@(x,y) daysinmonth(x,y), fields(:,1), fields(:,2)); + else + nrd = 1; + end + + s_agg(:,1) = cell2mat(fields(:,1)); + s_agg(:,2) = cell2mat(fields(:,2)); + + s_agg(:,3) = cellfun(@(x) spat_agg(x, mask, mval, A), fields(:,3)); + s_agg(:,3) = s_agg(:,3)./nrd; + + + +elseif strcmp(tscale, 'monthly') + + glob_val = zeros(12, 13); + glob_val(:,1) = 1:12; + + j_indx = 1:12:length(fields); + + for i = 1:12 + + % Loop over all continents + for j = 1:length(j_indx) + + if dimswitch == 1 + nrd = daysinmonth(fields{j_indx(j)+i-1,1}, ... + fields{j_indx(j)+i-1,2}); + else + nrd = 1; + end + + for k = 1:length(cindx) + + mask = mask_t; + mask(continents == cindx(k)) = 1; + mask(fields{j_indx(j)+i-1,3} == mval) = 0; + + ar_wts = A.*mask; + ar_tot = sum(sum(ar_wts)); + + tmp = fields{j_indx(j)+i-1,3}.*ar_wts; + glob_val(i,k+1) = glob_val(i,k+1) + sum(sum(tmp))/(nrd*ar_tot); + + end + end + glob_val(i,:) = glob_val(i,:)/length(j_indx); + end + +elseif strcmp(tscale, 'annual') + + j_indx = 1:12:length(fields); + glob_val = zeros(length(j_indx), 13); + + for i = 1:length(j_indx) + glob_val(i,1) = fields{j_indx(i),2}; + + for j = 1:12 + + if dimswitch == 1 + nrd = daysinmonth(fields{j_indx(i)+j-1,1}, ... + fields{j_indx(i)+j-1,2}); + else + nrd = 1; + end + + for k = 1:length(cindx) + mask = mask_t; + mask(continents == cindx(k)) = 1; + mask(fields{j_indx(i)+j-1,3} == mval) = 0; + + ar_wts = A.*mask; + ar_tot = sum(sum(ar_wts)); + + tmp = fields{j_indx(i)+j-1,3}.*ar_wts; + glob_val(i,k+1) = glob_val(i,k+1) + sum(sum(tmp))/(ar_tot*nrd); + end + end + glob_val(i, 2:end) = glob_val(i, 2:end)/12; + + + end + + + + + + + + + + +elseif strcmp(tscale, 'seasonal') + glob_val = zeros(4, 12); + j_indx = 1:12:length(fields); % Index of all Januaries + n = length(j_indx); + sns = [1 2 3; 4 5 6; 7 8 9; 10 11 12]; + + for i = 1:4 % Loop over the seasons + + for j = 1:n % Loop over the years + + for l = 1:3 % Loop over the months + if dimswitch == 1 + nrd = daysinmonth(fields{j_indx(j)+sns(i,l)-1,1}, ... + fields{j_indx(j)+sns(i,l)-1,2}); + else + nrd = 1; + end + + for k = 1:12 % Loop over the continents + mask = mask_t; + mask(fields{j_indx(j)+sns(i,l)-1,3} == mval) = 0; + mask(continents == k-1) = 1; + + ar_wts = A.*mask; + ar_tot = sum(sum(ar_wts)); + + tmp = fields{j_indx(j)+sns(i,l)-1,3}.*ar_wts; + + glob_val(i,k) = glob_val(i,k) + sum(sum(tmp))/(ar_tot*nrd); + end + end + end + end + glob_val = glob_val/n; + +end + + + + + + + + + + + + + + + + + + + + diff --git a/comp_euro_prec.m b/comp_euro_prec.m new file mode 100644 index 0000000..12bb202 --- /dev/null +++ b/comp_euro_prec.m @@ -0,0 +1,190 @@ +% Comparison of annual precipitation in Europe +% clear all +close all +% +% % loading the datasets +% load /home/lorenz-c/Data/Precipitation/E-Obs/E-OBS_PREC.mat +% load /home/lorenz-c/Data/Precipitation/MERRA/MERRA_PREC.mat +% load /home/lorenz-c/Data/Precipitation/ECMWF/ECMWF_PREC.mat +% load /home/lorenz-c/Data/Precipitation/GPCC/GPCC_PREC.mat +% load /home/lorenz-c/Data/Precipitation/CRU3/CRU3_PREC.mat +% % load /home/lorenz-c/Data/Precipitation/DEL/DEL_PREC.mat +% load /home/lorenz-c/Data/Precipitation/CFSR/CFSR_PREC.mat +% % +% load /home/lorenz-c/Data/colormaps/precip_cmap.mat +% load /home/lorenz-c/Data/colormaps/rel_precip_cmap.mat + +k = 1; +% % Computing the total annual precipitation of each dataset +% +longi = eobs_prec{1,8}; +lati = eobs_prec{1,7}; +load coast +% % +% % +setname{1} = 'GPCC'; +setname{2} = 'E-OBS'; +setname{3} = 'CRU3'; +% setname{3} = 'DEL'; +setname{4} = 'ECMWF'; +setname{5} = 'MERRA'; +setname{6} = 'CFSR'; + + + + +year = 2000; +k = 1; +for i = 1:7 + + r_eobs = find(cell2mat(eobs_prec(:,5)) == year); + r_merra = find(cell2mat(merra_prec(:,5)) == year); + r_ecmwf = find(cell2mat(ecmwf_prec(:,5)) == year); + r_gpcc = find(cell2mat(gpcc_prec(:,4)) == year); + r_cfsr = find(cell2mat(cfsr_prec(:,5)) == year); + r_cru = find(cell2mat(cru3_prec(:,5)) == year); + + + merra{k,1} = zeros(101,232); + ecmwf{k,1} = zeros(101,232); + cru{k,1} = zeros(101,232); + gpcc{k,1} = zeros(101,232); + cfsr{k,1} = zeros(101,232); + eobs{k,1} = zeros(101,232); + + + % Datasets with monthly values + for j = 1:12 + cfsr{k,1} = cfsr{k,1} + cfsr_prec{r_cfsr(j),9}(30:130, 280:511); + merra{k,1} = merra{k,1} + merra_prec{r_merra(j),9}(30:130, 280:511); + ecmwf{k,1} = ecmwf{k,1} + ecmwf_prec{r_ecmwf(j),9}(30:130, 280:511); + cru{k,1} = cru{k,1} + cru3_prec{r_cru(j),9}(30:130, 280:511); + gpcc{k,1} = gpcc{k,1} + gpcc_prec{r_gpcc(j),8}(30:130, 280:511); +% del{k,1} = del{k,1} + del_prec{r_del(j),9}(30:130, 280:511); + end + + % Datasets with daily values + msk = zeros(101,232); + tmp_msk = zeros(101,232); + + for j = 1:length(r_eobs) + tmp_msk(eobs_prec{r_eobs(j),9}~=-9999) = 1; + msk = msk + tmp_msk; + eobs{k,1}(eobs_prec{r_eobs(j),9}~=-9999) = ... + eobs{k,1}(eobs_prec{r_eobs(j),9}~=-9999) + ... + eobs_prec{r_eobs(j),9}(eobs_prec{r_eobs(j),9}~=-9999); + end + + +% eobs{k,1} = eobs{k,1}./msk; + eobs{k,1}(msk 0 + load continents.asc +end + + + +sind = find(cell2mat(inpt(:,clms(1))) == 1 & ... + cell2mat(inpt(:,clms(2))) == time(1)); +eind = find(cell2mat(inpt(:,clms(1))) == 12 & ... + cell2mat(inpt(:,clms(2))) == time(2)); +fields = inpt(sind:eind, clms); + + +if strcmp(mval, 'NaN') + for i = 1:length(fields) + fields{i,3}(isnan(fields{i,3})) = -9999; + end + mval = -9999; +end + + +A = area_wghts(0.25:0.5:179.75, 0.5); +A = A'; +A = A*ones(1,720); + + + + +% Computing a mask for the different setups +mask = gen_mask(cswitch); + +if strcmp(tscale, 'complete') + + if dimflg == 1 + nrd = cellfun(@(x,y) daysinmonth(x,y), fields(:,1), fields(:,2)); + else + nrd = 1; + end + + s_agg(:,1) = cell2mat(fields(:,1)); + s_agg(:,2) = cell2mat(fields(:,2)); + + s_agg(:,3) = cellfun(@(x) spat_agg(x, mask, mval, A), fields(:,3)); + s_agg(:,3) = s_agg(:,3)./nrd; + + +elseif strcmp(tscale, 'monthly') + + mnths = cell2mat(fields(:,1)); + + s_agg = zeros(12, 2); + + for i = 1:12 + + indx = find(mnths == i); + + if dimflg == 1 + nrd = cellfun(@(x,y) daysinmonth(x,y), fields(indx,1), ... + fields(indx,2)); + else + nrd = 1; + end + + tmp = cellfun(@(x) spat_agg(x, mask, mval, A), fields(indx,3)); + + s_agg(i,1) = i; + s_agg(i,2) = mean(tmp./nrd); + + end + +elseif strcmp(tscale, 'annual') + + yrs = cell2mat(fields(:,2)); + yrs_t = unique(yrs); + + for i = 1:length(yrs_t) + indx = find(yrs == yrs_t(i)); + + if dimflg == 1 + nrd = cellfun(@(x,y) daysinmonth(x,y), fields(indx,1), ... + fields(indx,2)); + nrd = sum(nrd); + else + nrd = 1; + end + + tmp = cellfun(@(x) spat_agg(x, mask, mval, A), fields(indx,3)); + s_agg(i,1) = yrs_t(i); + s_agg(i,2) = mean(tmp./nrd); + end +end + + + + + + + + + + + + + +% elseif strcmp(tscale, 'seasonal_1') +% j_indx = 1:12:length(fields); % Index of all Januaries +% n = length(j_indx); +% if cswitch < 6 +% glob_val = zeros(length(j_indx), 4); +% +% for i = 1:4 +% +% for j = 1:n +% mask = mask_t; +% tmp = zeros(360, 720); +% for k = 1:3 +% nrd = daysinmonth(fields{j_indx(i)+j-1,1}, ... +% fields{j_indx(i)+j-1,2}); +% mask(fields{j_indx(i+k-1),3} == mval) = 0; +% tmp = tmp + mask.*fields{j_indx(i+k-1),3}/nrd; +% end +% ar_wts = A.*mask; +% ar_tot = sum(sum(ar_wts)); +% +% glob_val(j,i) = sum(sum(ar_wts.*tmp))/ar_tot; +% end +% j_indx = j_indx + 3; +% end +% elseif cswitch == 6 +% +% for i = 1:4 +% +% end +% +% +% +% +% end +% end +% +% +% +% +% + + + + + + + + + + + + + + + + + + + + + diff --git a/comp_glob_t2.m b/comp_glob_t2.m new file mode 100644 index 0000000..3d8cb45 --- /dev/null +++ b/comp_glob_t2.m @@ -0,0 +1,121 @@ +load /home/lorenz-c/Data/Temperature/T2/CRU3/CRU_T2.mat +sind = find(cell2mat(cru_t2(:,4)) == 1 & ... + cell2mat(cru_t2(:,5)) == 1989); +eind = find(cell2mat(cru_t2(:,4)) == 12 & ... + cell2mat(cru_t2(:,5)) == 2006); + +cru = cru_t2(sind:eind, [4 5 9]); +clear cru_t2 sind eind + +load /home/lorenz-c/Data/Temperature/T2/DEL/DEL_T2.mat +sind = find(cell2mat(del_t2(:,4)) == 1 & ... + cell2mat(del_t2(:,5)) == 1989); +eind = find(cell2mat(del_t2(:,4)) == 12 & ... + cell2mat(del_t2(:,5)) == 2006); + +del = del_t2(sind:eind, [4 5 9]); +clear del_t2 sind eind + +load /home/lorenz-c/Data/Temperature/T2/ECMWF/ECMWF_T2.mat +sind = find(cell2mat(ecmwf_t2(:,4)) == 1 & ... + cell2mat(ecmwf_t2(:,5)) == 1989); +eind = find(cell2mat(ecmwf_t2(:,4)) == 12 & ... + cell2mat(ecmwf_t2(:,5)) == 2006); + +ecmwf = ecmwf_t2(sind:eind, [4 5 9]); +clear ecmwf_t2 sind eind + + +load /home/lorenz-c/Data/Temperature/T2/MERRA/MERRA_T2.mat +sind = find(cell2mat(merra_t2(:,4)) == 1 & ... + cell2mat(merra_t2(:,5)) == 1989); +eind = find(cell2mat(merra_t2(:,4)) == 12 & ... + cell2mat(merra_t2(:,5)) == 2006); + +merra = merra_t2(sind:eind, [4 5 9]); +clear merra_t2 sind eind + +load /home/lorenz-c/Data/Temperature/T2/CFSR/CFSR_T2.mat +sind = find(cell2mat(cfsr_t2(:,4)) == 1 & ... + cell2mat(cfsr_t2(:,5)) == 1989); +eind = find(cell2mat(cfsr_t2(:,4)) == 12 & ... + cell2mat(cfsr_t2(:,5)) == 2006); + +cfsr = cfsr_t2(sind:eind, [4 5 9]); +clear cfsr_t2 sind eind + + +cru_mn = zeros(360, 720); +del_mn = zeros(360, 720); +ecmwf_mn = zeros(360, 720); +merra_mn = zeros(360, 720); +cfsr_mn = zeros(360, 720); + +mask_cru = zeros(360, 720); +mask_del = zeros(360, 720); + + +for i = 1:length(cru) + cru_mn = cru_mn + cru{i,3}; + mask_cru(cru{i,3} ~= -9999) = mask_cru(cru{i,3} ~= -9999) + 1; + + del_mn = del_mn + del{i,3}; + mask_del(del{i,3} ~= -9999) = mask_del(del{i,3} ~= -9999) + 1; + + ecmwf_mn = ecmwf_mn + ecmwf{i,3}; + merra_mn = merra_mn + merra{i,3}; + cfsr_mn = cfsr_mn + cfsr{i,3}; +end + +mask_total = zeros(360, 720); + +mask_cru(mask_cru < 216) = 0; +mask_del(mask_del < 216) = 0; + +mask_total(mask_cru == 216 & mask_del == 216) = 216; + +cru_mn = cru_mn./mask_total; +del_mn = del_mn./mask_total; +ecmwf_mn = ecmwf_mn./mask_total; +merra_mn = merra_mn./mask_total; +cfsr_mn = cfsr_mn./mask_total; + +cru_mn(mask_total == 0) = NaN; +del_mn(mask_total == 0) = NaN; +ecmwf_mn(mask_total == 0) = NaN; +merra_mn(mask_total == 0) = NaN; +cfsr_mn(mask_total == 0) = NaN; + +d_del = del_mn - cru_mn; +d_ecmwf = ecmwf_mn - cru_mn; +d_merra = merra_mn - cru_mn; +d_cfsr = cfsr_mn - cru_mn; + + +A = grid2gmt(cru_mn, 0.5); +save /home/lorenz-c/Dokumente/Projektarbeit/Publications/Reana_comp/Images/Absolute/cru_ann_t2.txt A -ascii +A = grid2gmt(del_mn, 0.5); +save /home/lorenz-c/Dokumente/Projektarbeit/Publications/Reana_comp/Images/Absolute/del_ann_t2.txt A -ascii +A = grid2gmt(ecmwf_mn, 0.5); +save /home/lorenz-c/Dokumente/Projektarbeit/Publications/Reana_comp/Images/Absolute/ecmwf_ann_t2.txt A -ascii +A = grid2gmt(merra_mn, 0.5); +save /home/lorenz-c/Dokumente/Projektarbeit/Publications/Reana_comp/Images/Absolute/merra_ann_t2.txt A -ascii +A = grid2gmt(cfsr_mn, 0.5); +save /home/lorenz-c/Dokumente/Projektarbeit/Publications/Reana_comp/Images/Absolute/cfsr_ann_t2.txt A -ascii + +A = grid2gmt(d_del, 0.5); +save /home/lorenz-c/Dokumente/Projektarbeit/Publications/Reana_comp/Images/Differences/d_cpc_ann_t2.txt A -ascii +A = grid2gmt(d_ecmwf, 0.5); +save /home/lorenz-c/Dokumente/Projektarbeit/Publications/Reana_comp/Images/Differences/d_ecmwf_ann_t2.txt A -ascii +A = grid2gmt(d_merra, 0.5); +save /home/lorenz-c/Dokumente/Projektarbeit/Publications/Reana_comp/Images/Differences/d_merra_ann_t2.txt A -ascii +A = grid2gmt(d_cfsr, 0.5); +save /home/lorenz-c/Dokumente/Projektarbeit/Publications/Reana_comp/Images/Differences/d_cfsr_ann_t2.txt A -ascii + + + + + + + + diff --git a/comp_nrgauges.m b/comp_nrgauges.m new file mode 100644 index 0000000..b6829d5 --- /dev/null +++ b/comp_nrgauges.m @@ -0,0 +1,42 @@ +function [cpc, gpcc, ngauges] = comp_nrgauges + +load /media/storage/Data/Precipitation/CPC/CPC_GAUG.mat +load /media/storage/Data/Precipitation/GPCC/GPCC_NGAUGv4.0.mat + +load continents.asc + +for t = 1:216 + for i = 1:11 + tmp1 = zeros(360, 720); + tmp2 = zeros(360, 720); + + tmp1(continents == i) = 1; + tmp2(continents == i) = 1; + + tmp1 = tmp1.*cpc_gaug{t,9}; + tmp2 = tmp2.*gpcc_ngaug{t+120,9}; + + ngauges{1,1}(t,i) = sum(sum(tmp1)); + ngauges{1,2}(t,i) = sum(sum(tmp2)); + end +end + + +for j = 1:11 + k = 1; + for i = 1:12:216 + cpc(k, j) = mean(ngauges{1}(i:i+11,j)); + gpcc(k, j) = mean(ngauges{2}(i:i+11,j)); + k = k+1; + end +end +% +for i = 1:11 + ngauges{1,1}(:,i) = ngauges{1,1}(:,i)/ngauges{1,1}(1,i); + ngauges{1,2}(:,i) = ngauges{1,2}(:,i)/ngauges{1,2}(1,i); + cpc(:,i) = cpc(:,i)/cpc(1,i); + gpcc(:,i) = gpcc(:,i)/gpcc(1,i); +end + + + \ No newline at end of file diff --git a/comp_spat_corr.m b/comp_spat_corr.m new file mode 100644 index 0000000..7ff7637 --- /dev/null +++ b/comp_spat_corr.m @@ -0,0 +1,130 @@ +function R = comp_spat_corr(fld1, fld2, time, tscale, cswitch, clms, mval) + +% Of a given input dataset, this function computes the long-term mean +% either as the mean of the whole timeseries, a seasonal mean or a monthly +% mean. +% ------------------------------------------------------------------------- +% Input: fld1/2 'cell' The input dataset must be a cell variable +% which contains the global field itself and a +% time stamp +% time [1 x 2] Defines the start- and end-year of the +% time-series which is considered +% tscale 'string' 'annual_1' -> long-term annual mean +% 'annual_2' -> mean of each year +% 'seasonal' -> long-term of the four seasons +% JFM, AMJ, JAS, OND +% 'monthly' -> long-term mean for each +% month +% clms [1 x 3] tells the function which row of the input +% dataset contains month, year and the global +% field, i.e. clms(1) -> month, clms(2) -> year +% and clms(3) -> field +% If clms has only 3 elements, it is assumed +% that both datasets have the same ordering. +% Otherwise, clms is a [1 x 6] vector, of which +% the first 3 elements belong to fld1 and the +% rest to fld2. +% mval [1 x 1] Missing values in the datasets. If mval is +% NaN, it must be given as a string, i.e. mval = +% 'NaN' +% +% Output: R {1 x 1} long-term annual mean global field +% {1 x 4} long-term mean seasonal global fields +% {1 x 12} long-term mean monthly global fields + +% ------------------------------------------------------------------------- +% Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% Jaunary 2011 +% ------------------------------------------------------------------------- +% Uses: spat_corr.m, comp_spat_mean.m +% ------------------------------------------------------------------------- + + +if nargin < 7 + mval = -9999; +end + +if nargin < 6 + clms = [4 5 9 4 5 9]; +end + +if nargin < 5 + cswitch = 1; +end + +if nargin < 4 + tscale = 'complete'; +end + +if size(clms,1) == 3 + clms = [clms; clms]; +elseif size(clms,2) == 3 + clms = [clms clms]; +end + +sind1 = find(cell2mat(fld1(:,clms(1))) == 1 & ... + cell2mat(fld1(:,clms(2))) == time(1)); +eind1 = find(cell2mat(fld1(:,clms(1))) == 12 & ... + cell2mat(fld1(:,clms(2))) == time(2)); + +sind2 = find(cell2mat(fld2(:,clms(4))) == 1 & ... + cell2mat(fld2(:,clms(5))) == time(1)); +eind2 = find(cell2mat(fld2(:,clms(4))) == 12 & ... + cell2mat(fld2(:,clms(5))) == time(2)); + +fields(:,1:3) = fld1(sind1:eind1, clms(1:3)); +fields(:,4) = fld2(sind2:eind2, clms(6)); + + +if strcmp(tscale, 'annual_1') + + mn_1 = comp_spat_mean(fields, time, tscale, [1 2 3], mval); + mn_2 = comp_spat_mean(fields, time, tscale, [1 2 4], mval); + + R = spat_corr(mn_1, mn_2, cswitch, mval); + +elseif strcmp(tscale, 'annual_2') + j_indx = find(cell2mat(fields(:,1)) == 1); + + for i = 1:length(j_indx) + for j = 1:12 + tmp(j,:) = spat_corr(fields{j_indx(i)+j-1,3}, ... + fields{j_indx(i)+j-1,4}, cswitch, mval); + end + R(:,i) = mean(tmp,1)'; + end + + +elseif strcmp(tscale, 'complete') + + for i = 1:length(fields) + R(i,:) = spat_corr(fields{i,3}, fields{i,4}, cswitch, mval); + end + +elseif strcmp(tscale, 'monthly') + + mn_1 = comp_spat_mean(fields, time, 'monthly_2', [1 2 3], mval); + mn_2 = comp_spat_mean(fields, time, 'monthly_2', [1 2 4], mval); + + for i = 1:12 + for j = 1:size(mn_1,1) + tmp(j,:) = spat_corr(mn_1{j,i}, mn_2{j,i}, cswitch, mval); + end + R(:,i) = mean(tmp,1)'; + clear tmp; + end +end + + + + + + + + + + + + + + diff --git a/comp_spat_mean.m b/comp_spat_mean.m new file mode 100644 index 0000000..f99941d --- /dev/null +++ b/comp_spat_mean.m @@ -0,0 +1,213 @@ +function otpt = comp_spat_mean(inpt, time, tscale, clms, mval, mmdayflg) + +% Of a given input dataset, this function computes the long-term mean +% either as the mean of the whole timeseries, a seasonal mean or a monthly +% mean. +% ------------------------------------------------------------------------- +% Input: inpt 'cell' The input dataset must be a cell variable +% which contains the global field itself and a +% time stamp +% time [1 x 2] Defines the start- and end-year of the +% time-series which is considered +% tscale 'string' 'annual' -> long-term annual mean +% 'seasonal' -> long-term of the four seasons +% JFM, AMJ, JAS, OND +% 'monthly' -> long-term mean for each month +% clms [1 x 3] tells the function which row of the input +% dataset contains month, year and the global +% field, i.e. clms(1) -> month, clms(2) -> year +% and clms(3) -> field +% +% Output: otpt {1 x 1} long-term annual mean global field +% {1 x 4} long-term mean seasonal global fields +% {1 x 12} long-term mean monthly global fields + +% ------------------------------------------------------------------------- +% Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% Jaunary 2011 +% ------------------------------------------------------------------------- +% Uses: +% ------------------------------------------------------------------------- + + + +sind = find(cell2mat(inpt(:,clms(1))) == 1 & ... + cell2mat(inpt(:,clms(2))) == time(1)) +eind = find(cell2mat(inpt(:,clms(1))) == 12 & ... + cell2mat(inpt(:,clms(2))) == time(2)) + + +fields = inpt(sind:eind, clms); +fsize = size(fields{1,3}); + + +if strcmp(mval, 'NaN') + for i = 1:length(fields) + fields{i,3}(isnan(fields{i,3})) = -9999; + end + mval = -9999; +end + +if nargin < 6 + mmdayflg = 0; +end + + +if mmdayflg == 1 + for i = 1:length(fields) + nr_days(i,1) = daysinmonth(fields{i,1}, fields{i,2}); + end +else + nr_days = ones(length(fields), 1); +end + +if strcmp(tscale, 'annual_1') + + otpt = zeros(fsize); + valid_cells = zeros(fsize); + + for i = 1:length(fields) + otpt = otpt + fields{i,3}/nr_days(i); + valid_cells(fields{i,3} ~= mval) = ... + valid_cells(fields{i,3} ~= mval) + 1; + end + + otpt(valid_cells ~= 0) = otpt(valid_cells ~= 0) ... + ./valid_cells(valid_cells ~= 0); + + if strcmp(mval, 'NaN') + otpt(valid_cells == 0) = NaN; + else + otpt(valid_cells == 0) = mval; + end + + +elseif strcmp(tscale, 'annual_2') + j_indx = find(cell2mat(fields(:,1)) == 1); + + for i = 1:length(j_indx) + otpt{i,1} = zeros(fsize); + valid_cells = zeros(fsize); + tmp = zeros(fsize); + + for j = 1:12 + tmp = tmp + fields{j_indx(i)+j-1,3} ... + /nr_days(j_indx(i)+j-1); + valid_cells(fields{j_indx(i)+j-1,3} ~= mval) = ... + valid_cells(fields{j_indx(i)+j-1,3} ~= mval) + 1; + end + + otpt{i,1}(valid_cells ~= 0) = tmp(valid_cells ~= 0) ... + ./valid_cells(valid_cells ~= 0); + + if strcmp(mval, 'NaN') + otpt{i,1}(valid_cells == 0) = NaN; + else + otpt{i,1}(valid_cells == 0) = mval; + end + end + + +elseif strcmp(tscale, 'seasonal_1') + + ssn_indx(:,1) = find(cell2mat(fields(:,1)) == 1); + ssn_indx(:,2) = ssn_indx(:,1) + 3; + ssn_indx(:,3) = ssn_indx(:,2) + 3; + ssn_indx(:,4) = ssn_indx(:,3) + 3; + + for i = 1:4 + valid_cells = zeros(fsize); + otpt{1,i} = zeros(fsize); + tmp = zeros(fsize); + + for j = 1:size(ssn_indx,1) + for k = 1:3 + tmp = tmp + fields{ssn_indx(j+k-1,i), 3} ... + /nr_days(ssn_indx(j+k-1,i)); + valid_cells(fields{ssn_indx(j+k-1,i), 3} ~= mval) ... + = valid_cells(fields{ssn_indx(j+k-1,i), 3} ~= mval) + 1; + end + end + + otpt{1,i}(valid_cells ~= 0) = tmp(valid_cells ~= 0) ... + ./valid_cells(valid_cells ~= 0); + if strcmp(mval, 'NaN') + otpt{1,i}(valid_cells == 0) = NaN; + else + otpt{1,i}(valid_cells == 0) = mval; + end + + end + + +elseif strcmp(tscale, 'seasonal_2') + + ssn_indx(:,1) = find(cell2mat(fields(:,1)) == 1); + ssn_indx(:,2) = ssn_indx(:,1) + 3; + ssn_indx(:,3) = ssn_indx(:,2) + 3; + ssn_indx(:,4) = ssn_indx(:,3) + 3; + + for i = 1:4 + for j = 1:size(ssn_indx,1) + otpt{j,i} = zeros(fsize); + tmp = zeros(fsize); + valid_cells = zeros(fsize); + + for k = 1:3 + tmp = tmp + fields{ssn_indx(j,i)+k-1, 3} ... + /nr_days(ssn_indx(j,i)+k-1); + valid_cells(fields{ssn_indx(j,i)+k-1, 3} ~= mval) ... + = valid_cells(fields{ssn_indx(j,i)+k-1, 3} ~= mval) + 1; + end + + otpt{j,i}(valid_cells ~= 0) = tmp(valid_cells ~= 0) ... + ./valid_cells(valid_cells ~= 0); + if strcmp(mval, 'NaN') + otpt{j,i}(valid_cells == 0) = NaN; + else + otpt{j,i}(valid_cells == 0) = mval; + end + end + end + + + +elseif strcmp(tscale, 'monthly_1') +% j_indx = 1:12:length(fields) % Vector which contains all months + +mnths = cell2mat(fields(:, 1)); +yrs = cell2mat(fields(:, 2)); + for i = 1:12 + indx = find(mnths == i); + + otpt{i,1} = zeros(fsize); + tmp = zeros(fsize); + valid_cells = zeros(fsize); + + for j = 1:length(indx) + tmp = tmp + fields{indx(j), 3}; + valid_cells(fields{indx(j), 3} ~= mval) ... + = valid_cells(fields{indx(j), 3} ~= mval) + 1; + end + tmp(valid_cells ~= 0) = tmp(valid_cells ~= 0) ... + ./valid_cells(valid_cells ~= 0); + otpt{i,1} = tmp; + otpt{i,1}(valid_cells == 0) = mval; + + + end + +elseif strcmp(tscale, 'monthly_2') + m_indx(:,1) = find(cell2mat(fields(:,1)) == 1); + + for i = 1:11 + m_indx(:,i+1) = m_indx(:,i) + 1; + end + + for i = 1:size(m_indx,1) + for j = 1:12 + otpt{i,j} = fields{m_indx(i,j), 3}; + end + end +end + diff --git a/comp_tspat_corr.m b/comp_tspat_corr.m new file mode 100644 index 0000000..8060a2b --- /dev/null +++ b/comp_tspat_corr.m @@ -0,0 +1,83 @@ +function R = comp_tspat_corr(fld1, fld2, mval) + + + n = length(fld1) + + [r1, c1] = size(fld1{1}); + [r2, c2] = size(fld2{1}); + + +if r1 ~= r2 | c1 ~= c2 + error('Input fields must have the same the same length') +end + + + + + + +% R = NaN(r1); + +mn_1 = zeros(r1, c1); +mn_2 = zeros(r1, c1); +sig_1 = zeros(r1, c1); +sig_2 = zeros(r1, c1); +nom = zeros(r1, c1); + +for k = 1:n + mn_1 = mn_1 + fld1{k}; + mn_2 = mn_2 + fld2{k}; +end + +mn_1 = mn_1/n; +mn_2 = mn_2/n; +% keyboard +for k = 1:n + sig_1 = sig_1 + (fld1{k} - mn_1).^2; + sig_2 = sig_2 + (fld2{k} - mn_2).^2; + + nom = nom + (fld1{k} - mn_1).*(fld2{k} - mn_2); +end + +sig_1 = sqrt(sig_1); +sig_2 = sqrt(sig_2); + +% nom = nom; +% keyboard +R = nom./(sig_1.*sig_2); + + + + + +% tmp_1 = shiftdim(fld1_new(i,j,:)); +% tmp_2 = shiftdim(fld2_new(i,j,:)); + +% mn_1 = mean(tmp_1); +% mn_2 = mean(tmp_2); +% +% sig_1 = std(tmp_1); +% sig_2 = std(tmp_2); + +% if abs(sig_1) < 1e-5 +% sig_1 = 1e-10; +% end +% +% if abs(sig_2) < 1e-5 +% sig_2 = 1e-10; +% end +% +% if abs(mn_1) < 1e-5 +% mn_1 = 1e-10; +% end +% +% if abs(mn_2) < 1e-5 +% % mn_2 = 1e-10; +% % end +% +% R(i,j) = 1/n*((tmp_1 - mn_1)'*(tmp_2 - mn_2))/(sig_1*sig_2); +% +% end +% end +% + diff --git a/comp_waterbalance.m b/comp_waterbalance.m new file mode 100644 index 0000000..a57286c --- /dev/null +++ b/comp_waterbalance.m @@ -0,0 +1,183 @@ +function [wb1 wb2] = comp_waterbalance(set) + +load continents.asc + +A = area_wghts(0.25:0.5:179.75, 'mat'); + +mask_l = gen_mask(1); +mask_o = gen_mask(2); + +A_lnd = sum(sum(mask_l.*A)); +A_ocn = sum(sum(mask_o.*A)); + + +if strcmp(set, 'cfsr') + load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat + P = spataggmn(cfsr_prec, mask_l, [1, 0], 'clms', [4 5 9]); + tmp = spataggmn(cfsr_prec, ones(360, 720), 1, 'clms', [4 5 9]); + P = [P tmp(:,end)]; + clear cfsr* + + load /media/storage/Data/Evaporation/CFSR/CFSR_ET.mat + ET = spataggmn(cfsr_et, mask_l, [1 0], 'clms', [4 5 9]); + tmp = spataggmn(cfsr_et, ones(360, 720), 1, 'clms', [4 5 9]); + ET = [ET tmp(:,end)]; + clear cfsr* + + load /media/storage/Data/Runoff/CFSR/CFSR_R.mat + R = spataggmn(cfsr_r, mask_l, 1, 'clms', [4 5 9]); + clear cfsr* + + load /media/storage/Data/Mflux/CFSR/CFSR_VIMFD.mat + DQ = spataggmn(cfsr_vimfd, mask_l, [1 0], 'clms', [4 5 9]); + tmp = spataggmn(cfsr_vimfd, ones(360, 720), 1, 'clms', [4 5 9]); + DQ = [DQ tmp(:,end)]; + clear cfsr* + + load /media/storage/Data/Total_water_atm/CFSR/CFSR_TQV.mat + DW = spataggmn(cfsr_tqv, mask_l, [1 0], 'clms', [4 5 9]); + tmp = spataggmn(cfsr_tqv, ones(360, 720), 1, 'clms', [4 5 9]); + DW = [DW tmp(:,end)]; + + DWDT = cdiffts(DW(2:end,:), [1 1989 12 2006], 'clms', [1 2 4]); + clear cfsr_tqv DW + keyboard + + +elseif strcmp(set, 'merra') + load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat + P = spataggmn(merra_prec, mask_l, [1, 0], 'clms', [4 5 9]); + tmp = spataggmn(merra_prec, ones(360, 720), 1, 'clms', [4 5 9]); + P = [P tmp(:,end)]; + clear cfsr* + + load /media/storage/Data/Evaporation/MERRA/MERRA_ET.mat + ET = spataggmn(merra_et, mask_l, [1 0], 'clms', [4 5 9]); + tmp = spataggmn(merra_et, ones(360, 720), 1, 'clms', [4 5 9]); + ET = [ET tmp(:,end)]; + clear cfsr* + + load /media/storage/Data/Runoff/MERRA/MERRA_R.mat + R = spataggmn(merra_r, mask_l, 1, 'clms', [4 5 9]); + clear cfsr* + + load /media/storage/Data/Mflux/MERRA/MERRA_VIMFD.mat + DQ = spataggmn(merra_vimfd, mask_l, [1 0], 'clms', [4 5 9]); + tmp = spataggmn(merra_vimfd, ones(360, 720), 1, 'clms', [4 5 9]); + DQ = [DQ tmp(:,end)]; + clear cfsr* + + load /media/storage/Data/Total_water_atm/MERRA/MERRA_TQV.mat + DW = spataggmn(merra_tqv, mask_l, [1 0], 'clms', [4 5 9]); + tmp = spataggmn(merra_tqv, ones(360, 720), 1, 'clms', [4 5 9]); + DW = [DW tmp(:,end)]; + + DWDT = cdiffts(DW(2:end,:), [1 1989 12 2006], 'clms', [1 2 4]); + clear cfsr_tqv DW + keyboard + +elseif strcmp(set, 'ecmwf') + load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat + P_l = comp_glob_quant(ecmwf_prec, [1989 2006], 1, 'annual', [4 5 9], -9999, 0); + P_o = comp_glob_quant(ecmwf_prec, [1989 2006], 2, 'annual', [4 5 9], -9999, 0); + P_c = comp_glob_quant(ecmwf_prec, [1989 2006], 0, 'annual', [4 5 9], -9999, 0); + clear ecmwf_prec + + load /media/storage/Data/Evaporation/ECMWF/ECMWF_ET.mat + ET_l = comp_glob_quant(ecmwf_et, [1989 2006], 1, 'annual', [4 5 9], -9999, 0); + ET_o = comp_glob_quant(ecmwf_et, [1989 2006], 2, 'annual', [4 5 9], -9999, 0); + ET_c = comp_glob_quant(ecmwf_et, [1989 2006], 0, 'annual', [4 5 9], -9999, 0); + clear ecmwf_et + + load /media/storage/Data/Runoff/ECMWF/ECMWF_R.mat + R = comp_glob_quant(ecmwf_r, [1989 2006], 1, 'annual', [4 5 9], 'NaN', 0); + clear ecmwf_r + + load /media/storage/Data/Mflux/ECMWF/ECMWF_VIMFD.mat + DQ_l = comp_glob_quant(ecmwf_vimfd, [1989 2006], 1, 'annual', [4 5 9], -9999, 0); + DQ_o = comp_glob_quant(ecmwf_vimfd, [1989 2006], 2, 'annual', [4 5 9], -9999, 0); + DQ_c = comp_glob_quant(ecmwf_vimfd, [1989 2006], 0, 'annual', [4 5 9], -9999, 0); + clear ecmwf_vimfd + + load /media/storage/Data/Total_water_atm/ECMWF/ECMWF_TQV.mat + dwdt = cdiffcell(ecmwf_tqv, 'time', [1979 2009], 'clms', [4 5 9]); + DW_l = comp_glob_quant(dwdt, [1989 2006], 1, 'annual', [1 2 3], -9999, 0); + DW_o = comp_glob_quant(dwdt, [1989 2006], 2, 'annual', [1 2 3], -9999, 0); + DW_c = comp_glob_quant(dwdt, [1989 2006], 0, 'annual', [1 2 3], -9999, 0); + clear ecmwf_tqv dwdt + + +end + +% keyboard + +% Absolute quantities +wb2(1) = mean(P_l(:,2))*12*A_lnd; % Continental prec. +wb2(2) = mean(ET_l(:,2))*12*A_lnd; % Continental evap. +wb2(3) = mean(R(:,2))*12*A_lnd; % Runoff +wb2(4) = mean(DQ_l(:,2))*12*A_lnd; % Continental moist. flx. +wb2(5) = mean(DW_l(:,2))*12*A_lnd; % Continental wat. vap. + +wb2(6) = mean(P_o(:,2))*12*A_ocn; % Oceanic pre. +wb2(7) = mean(ET_o(:,2))*12*A_ocn; % Oceanic evap. +wb2(8) = mean(DQ_o(:,2))*12*A_ocn; % Oceanic moist. flx. +wb2(9) = mean(DW_o(:,2))*12*A_ocn; % Oceanic wat. vap. + +wb2(10) = mean(P_c(:,2))*12*(A_ocn+A_lnd); % Global prec. +wb2(11) = mean(ET_c(:,2))*12*(A_ocn+A_lnd); % Global evap. +wb2(12) = mean(DQ_c(:,2))*12*(A_ocn+A_lnd); % Global moist. flx. +wb2(13) = mean(DW_c(:,2))*12*(A_ocn+A_lnd); % Global wat. vap. + +% Budget terms +wb2(14) = wb2(1) - wb2(2); % Continental P - E +wb2(15) = wb2(6) - wb2(7); % Oceanic P - E +wb2(16) = wb2(10) - wb2(11); % Global P - E + + + +% Absolute quantities +% wb1(:,1) = P_l(:,2)*A_lnd; % Continental prec. +% wb1(:,2) = ET_l(:,2)*A_lnd; % Continental evap. +% wb1(:,3) = R(:,2)*A_lnd; % Runoff +% wb1(:,4) = -DQ_l(:,2)*A_lnd; % Continental moist. flx. +% wb1(:,5) = DW_l(:,2)*A_lnd; % Continental wat. vap. +% +% wb1(:,6) = P_o(:,2)*A_ocn; % Oceanic pre. +% wb1(:,7) = ET_o(:,2)*A_ocn; % Oceanic evap. +% wb1(:,8) = -DQ_o(:,2)*A_ocn; % Oceanic moist. flx. +% wb1(:,9) = DW_o(:,2)*A_ocn; % Oceanic wat. vap. +% +% wb1(:,10) = P_c(:,2)*(A_lnd+A_ocn); % Global prec. +% wb1(:,11) = ET_c(:,2)*(A_lnd+A_ocn); % Global evap. +% wb1(:,12) = -DQ_c(:,2)*(A_lnd+A_ocn); % Global moist. flx. +% wb1(:,13) = DW_c(:,2)*(A_lnd+A_ocn); % Global wat. vap. +% +% % Budget terms +% wb1(:,14) = wb1(:,1) - wb1(:,2); % Continental P - E +% wb1(:,15) = wb1(:,6) - wb1(:,7); % Oceanic P - E +% wb1(:,16) = wb1(:,10) - wb1(:,11); % Global P - E + + + + +wb1(:,1) = P_l(:,2)*12*A_lnd; % Continental prec. +wb1(:,2) = ET_l(:,2)*12*A_lnd; % Continental evap. +wb1(:,3) = R(:,2)*12*A_lnd; % Runoff +wb1(:,4) = -DQ_l(:,2)*12*A_lnd; % Continental moist. flx. +wb1(:,5) = DW_l(:,2)*12*A_lnd; % Continental wat. vap. + +wb1(:,6) = P_o(:,2)*12*A_ocn; % Oceanic pre. +wb1(:,7) = ET_o(:,2)*12*A_ocn; % Oceanic evap. +wb1(:,8) = -DQ_o(:,2)*12*A_ocn; % Oceanic moist. flx. +wb1(:,9) = DW_o(:,2)*12*A_ocn; % Oceanic wat. vap. + +wb1(:,10) = P_c(:,2)*12*(A_ocn+A_lnd); % Global prec. +wb1(:,11) = ET_c(:,2)*12*(A_ocn+A_lnd); % Global evap. +wb1(:,12) = -DQ_c(:,2)*12*(A_ocn+A_lnd); % Global moist. flx. +wb1(:,13) = DW_c(:,2)*12*(A_ocn+A_lnd); % Global wat. vap. + +% Budget terms +wb1(:,14) = wb1(:,1) - wb1(:,2); % Continental P - E +wb1(:,15) = wb1(:,6) - wb1(:,7); % Oceanic P - E +wb1(:,16) = wb1(:,10) - wb1(:,11); % Global P - E + diff --git a/comp_zon_ann_prec.m b/comp_zon_ann_prec.m new file mode 100644 index 0000000..6e49791 --- /dev/null +++ b/comp_zon_ann_prec.m @@ -0,0 +1,101 @@ +function [zonal_prec] = comp_zon_prec(inpt, clms, time, cswitch, tscale) + + +if cswitch == 1 + load indexfile3.asc + mask = indexfile3; + mask(mask>0) = 1; + mask(mask == -9999) = 0; +end + + +yrs = time(1):1:time(2); + +sind = find(cell2mat(inpt(:,4)) == 1 & cell2mat(inpt(:,5)) == time(1)); +eind = find(cell2mat(inpt(:,4)) == 12 & cell2mat(inpt(:,5)) == time(2)); + +if strcmp(tscale, 'yr') + jan_ind = sind:12:eind; + + theta = 0.25:0.5:179.75; + A = area_wghts(theta, 0.5); + A = A'; + + for i = 1:length(jan_ind) + + mask = indexfile3; + mask(mask>0) = 1; + mask(mask == -9999) = 0; + + ann_mean = zeros(360, 720); + + for j = 1:12 + + tmp = inpt{jan_ind(i) + j - 1, 9}; + mask(tmp == -9999) = 0; + tmp = tmp.*mask; + + ann_mean = ann_mean + tmp; + + end + + valid_cells = sum(mask,2); + tot_lat_area = A.*valid_cells; + total_area = sum(tot_lat_area); + lat_weights = tot_lat_area./total_area; + + zonal_prec_tmp(:,i) = sum(ann_mean,2).*lat_weights; +% keyboard + plot(zonal_prec_tmp(:,i)); + hold on + end + zonal_prec = mean(zonal_prec_tmp,2); + +elseif strcmp(tscale, 'mnth') + + tmp_fields = inpt(sind:eind,:); + + for i = 1:12 + + + mnth_ind = find(cell2mat(tmp_fields(:, 4)) == i); + + mnth_mean = zeros(360, 720); + + mask = indexfile3; + mask(mask>0) = 1; + mask(mask == -9999) = 0; + + for j = 1:length(mnth_ind) + tmp = tmp_fields{mnth_ind(j), 9}; + mask(tmp == -9999) = 0; + tmp = tmp.*mask; + + mnth_mean = mnth_mean + tmp; + end + + valid_cells = sum(mask,2); + tot_lat_area = A.*valid_cells; + total_area = sum(tot_lat_area); + lat_weights = tot_lat_area./total_area; + + zonal_prec(:,i) = sum(mnth_mean,2).*lat_weights; + end +end + + + + + + + + + + + + + + + + + diff --git a/comp_zon_prec.m b/comp_zon_prec.m new file mode 100644 index 0000000..33a7787 --- /dev/null +++ b/comp_zon_prec.m @@ -0,0 +1,160 @@ +function [zonal_prec] = comp_zon_prec(inpt, time, cswitch, tscale) + +clms = [4 5 9]; + +if nargin < 4 + tscale = 'mnth'; +end + +if cswitch == 1 | cswitch == 2 + load continents.asc +end + + +yrs = time(1):1:time(2); + +sind = find(cell2mat(inpt(:,4)) == 1 & cell2mat(inpt(:,5)) == time(1)); +eind = find(cell2mat(inpt(:,4)) == 12 & cell2mat(inpt(:,5)) == time(2)); + +if strcmp(tscale, 'yr') + + jan_ind = sind:12:eind; + + for i = 1:length(jan_ind) + + mask = zeros(360, 720); + + if cswitch == 0 + mask = mask + 1; + elseif cswitch == 1 + mask(continents > -9999) = 1; + elseif cswitch == 2 + mask(continents == -9999) = 1; + end + + + yr_sum = zeros(360, 720); + + for j = 1:12 + + tmp = inpt{jan_ind(i) + j - 1, 9}; + mask(tmp == -9999) = 0; + tmp = tmp.*mask; + nr_days = daysinmonth(j, inpt{jan_ind(i) + j - 1, 5}); + yr_sum = yr_sum + tmp/nr_days; + + end + + valid_cells = sum(mask,2); + zonal_prec_tmp(:,i) = sum(yr_sum,2)./(valid_cells*12); + + end + + zonal_prec = mean(zonal_prec_tmp,2); + zonal_prec(valid_cells == 0) = 0; + +elseif strcmp(tscale, 'mnth') + zonal_prec = zeros(360, 12); + + tmp_fields = inpt(sind:eind,:); + + for i = 1:12 + + % Searching through the data to find the indices of all identical + % months + mnth_ind = find(cell2mat(tmp_fields(:, 4)) == i); + + % Creating a matrix with zeros, in which the sums of the monthly + % values will be stored + mnth_sum = zeros(360, 720); + + % If only continental values are desired, the mask filters out the + % values over the oceans + mask = zeros(360, 720); + + if cswitch == 0 + mask = mask + 1; + elseif cswitch == 1 + mask(continents > -9999) = 1; + elseif cswitch == 2 + mask(continents == -9999) = 1; + end + + for j = 1:length(mnth_ind) + tmp = tmp_fields{mnth_ind(j), 9}; + mask(tmp == -9999) = 0; + tmp = tmp.*mask; + + % Computing the days of the considered month for the conversion + % of [mm/month] to [mm/day] + nr_days = daysinmonth(i, tmp_fields{mnth_ind(j), 5}); + + mnth_sum = mnth_sum + tmp/nr_days; + end + + % Computing the number of cells with valid values + % -> It is not possible to simply take the mean of every row as the + % number of valid cells changes with latitude. Thus, we have to + % divide the sum of the single elements by the number of valid + % cells of a specific row to obtain the mean! + valid_cells = sum(mask,2); + + zonal_prec(:,i) = sum(mnth_sum,2)./(valid_cells*length(mnth_ind)); + zonal_prec(valid_cells == 0, i) = 0; + + end + +elseif strcmp(tscale, 'season') + mnths = [12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; + + mask = zeros(360, 720); + + if cswitch == 0 + mask = mask + 1; + elseif cswitch == 1 + mask(continents > -9999) = 1; + elseif cswitch == 2 + mask(continents == -9999) = 1; + end + + for i = 1:12 + indices(:,i) = find(cell2mat(inpt(:,4)) == mnths(i)); + end + + zonal_prec = zeros(360,4); + + for i = 1:4 + snl_mn = zeros(360, 720); + + for j = 1:size(indices,1) + for k = 1:3 + nr_days = daysinmonth(fields{indices(j,(i-1)*3+k),1}, ... + fields{indices(j,(i-1)*3+k),2}); + snl_mn = snl_mn + inpt{indices(j,(i-1)*3+k),3}/nr_days; + mask(inpt{indices(j,(i-1)*3+k),3} == mval) = 0; + end + end + valid_cells = sum(mask,2); + zonal_prec_tmp = snl_mn/(3*size(indices,1)); + + end + + + +end + + + + + + + + + + + + + + + + diff --git a/comp_zon_quant.m b/comp_zon_quant.m new file mode 100644 index 0000000..e6329b7 --- /dev/null +++ b/comp_zon_quant.m @@ -0,0 +1,238 @@ +function zon_val = comp_zon_quant(inpt, time, cswitch, tscale, clms, mval) + +if nargin < 6 + mval = -9999; +end + +if nargin < 5 + clms = [4 5 9]; +end + +if nargin < 4 + tscale = 'annual'; +end + +if nargin < 3 + cswitch = 0; +end + +% If we want to compute only continental or oceanic values, we need a +% land/sea-mask! +if cswitch == 1 | cswitch == 2 | cswitch == 3 + load continents.asc +end + +sind = find(cell2mat(inpt(:,clms(1))) == 1 & ... + cell2mat(inpt(:,clms(2))) == time(1)); +eind = find(cell2mat(inpt(:,clms(1))) == 12 & ... + cell2mat(inpt(:,clms(2))) == time(2)); + + +% if strcmp(tscale, 'season') +% % If our input dataset starts in January of the start year, we need to +% % switch to march for seasonal values +% if sind == 1 +% sind = sind + 3; +% % Otherwise, we can also use the data from December of the last year +% else +% sind = sind - 1; +% end +% % If our input dataset ends in December of the end year, we need to +% % switch to November for seasonal values +% if eind == length(inpt) +% eind = eind - 1; +% % Otherwise, we can also use the data from December of the following year +% else +% eind = eind + 11; +% end +% end + +if strcmp(tscale, 'seasonal') + sind = sind - 1; + eind = eind - 1; +end + + +fields = inpt(sind:eind, [clms(1) clms(2) clms(3)]); + +clear inpt + + +if strcmp(tscale, 'annual') + + % The indices of all Januaries of the dataset + jan_ind = 1:12:length(fields); + + zon_val = zeros(360, 1); + + valid_years = zeros(360, 1); + % Now, the mean of each year is computed seperately + for i = 1:length(jan_ind) + + % Defining a mask for global, continental or oceanic values + mask = zeros(360, 720); + + if cswitch == 0 + mask = mask + 1; % All values + elseif cswitch == 1 + mask(continents ~= mval) = 1; % Continental values + elseif cswitch == 2 + mask(continents == mval) = 1; % Oceanic values + elseif cswitch == 3 + mask(continents ~= mval) = 1; % Without polar regions + mask(continents == 4) = 0; + end + + fld_sum = zeros(360, 720); + + + for j = 1:12 + % Storing the actual field in the tmp variable + tmp = fields{jan_ind(i) + j - 1, 3}; + % Setting the elements of the mask to zero, where missing + % values in the input dataset are present + mask(tmp == mval) = 0; + % Computing a global/continental/oceanic field without missing + % values + tmp = tmp.*mask; + % For the conversion from mm/month to mm/day, we need to know + % the number of days of the specific month + nr_days = daysinmonth(j, fields{jan_ind(i) + j - 1, 2}); + % Adding the actual month to the yearly sum + fld_sum = fld_sum + tmp/nr_days; + end + + % Computing the number of cells with valid values + % -> It is not possible to simply take the mean of every row as the + % number of valid cells changes with latitude. Thus, we have to + % divide the sum of the single elements by the number of valid + % cells of a specific row to obtain the mean! + % Furthermore, this step removes these latitudes, where not all + % months of a specific year contain valid values to aviod an + % estimate which is shifted towards a specific period of a year. + valid_cells = sum(mask,2); + valid_cells(valid_cells < 10) = 0; + valid_years(valid_cells ~= 0) = valid_years(valid_cells ~= 0) + 1; + % Now we add the zonal values of the actual year + zon_val = zon_val + sum(fld_sum,2)./(valid_cells*12); + end + + zon_val = zon_val./valid_years; + zon_val(valid_years == 0) = 0; + zon_val(valid_cells < 15) = 0; +elseif strcmp(tscale, 'monthly') + + zon_val = zeros(360, 12); + + for i = 1:12 + % Look through the dataset to find all Januaries, Februaries, ... + mnth_ind = find(cell2mat(fields(:, 1)) == i); + + % Defining a mask for global, continental or oceanic values + mask = zeros(360, 720); + + if cswitch == 0 + mask = mask + 1; % All values + elseif cswitch == 1 + mask(continents ~= mval) = 1; % Continental values + elseif cswitch == 2 + mask(continents == mval) = 1; % Oceanic values + elseif cswitch == 3 + mask(continents ~= mval) = 1; % Without polar regions + mask(continents == 4) = 0; + end + + fld_sum = zeros(360, 720); + + for j = 1:length(mnth_ind) + % Storing the actual field in the tmp variable + tmp = fields{mnth_ind(j), 3}; + % Setting the elements of the mask to zero, where missing + % values in the input dataset are present + mask(tmp == mval) = 0; + % Computing a global/continental/oceanic field without missing + % values + tmp = tmp.*mask; + % For the conversion from mm/month to mm/day, we need to know + % the number of days of the specific month + nr_days = daysinmonth(j, fields{mnth_ind(j), 2}); + % Now we add the zonal values of the actual year + fld_sum = fld_sum + tmp/nr_days; + end + + valid_cells = sum(mask,2); + zon_val(:,i) = sum(fld_sum,2)./(valid_cells*length(mnth_ind)); + zon_val(valid_cells == 0, i) = NaN; + zon_val(valid_cells < 15, i) = NaN; + end + +elseif strcmp(tscale, 'seasonal') + % We want to compute the mean values for DJF, MAM, JJA and SON + mnths = [12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; + + % Defining a mask for global, continental or oceanic values + mask = zeros(360, 720); + + if cswitch == 0 + mask = mask + 1; % All values + elseif cswitch == 1 + mask(continents ~= mval) = 1; % Continental values + elseif cswitch == 2 + mask(continents == mval) = 1; % Oceanic values + elseif cswitch == 3 + mask(continents ~= mval) = 1; % Without polar regions + mask(continents == 4) = 0; + end + + fld_sum = zeros(360, 720); + + % First, we need + for i = 1:12 + indices(:,i) = find(cell2mat(fields(:,1)) == mnths(i)); + end + + for i = 1:4 + fld_sum = zeros(360, 720); + + for j = 1:size(indices,1) + + for k = 1:3 + tmp = fields{indices(j,(i-1)*3+k),3}; + mask(tmp == mval) = 0; + tmp = tmp.*mask; + nr_days = daysinmonth(fields{indices(j,(i-1)*3+k),1}, ... + fields{indices(j,(i-1)*3+k),2}); + fld_sum = fld_sum + tmp/nr_days; + mask(tmp == mval) = 0; + end + end + + valid_cells = sum(mask,2); + zon_val(:,i) = sum(fld_sum,2)./(valid_cells*j*3); + zon_val(valid_cells == 0, i) = 0; + zon_val(valid_cells < 15, i) = 0; + end +end + + + + + + + + + + + + + + + + + + + + + + + diff --git a/compare_sp.m b/compare_sp.m new file mode 100644 index 0000000..659b5bc --- /dev/null +++ b/compare_sp.m @@ -0,0 +1,259 @@ +clear all + +flder = '/media/storage/Data/Surface Pressure/OPANL/original/'; +flenm = 'ECMWF_opanl_ps_2002.nc'; +PS_opanl = nj_varget([flder, flenm], 'var134'); + +flder = '/media/storage/Data/Surface Pressure/20th/original/'; +flenm = 'pres.sfc.2002.nc'; +PS_twenth = nj_varget([flder, flenm], 'pres'); + +flder = '/media/storage/Data/Surface Pressure/INTERIM/original/'; +flenm = 'ECMWF_interim_ps_2002.nc'; +PS_interim = nj_varget([flder, flenm], 'var134'); + +cd /home/lorenz-c/Dokumente/GRACE/SHBundle + +l = standing(0:60); +tf = isotf(l, 'sp', 0, 0, 'grace'); +tf = tf*ones(1, 121); + +for i = 1:365 + sc_opanl{i,1} = cs2sc(gsha(shiftdim(PS_opanl(i,:,:)), 'ls', 'pole', 60)); + sc_opanl{i,2} = sc_opanl{i,1}./tf; + sc_opanl{i,3} = degvar(sc_opanl{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_twenth{i,1} = cs2sc(gsha(shiftdim(PS_twenth(i,:,:)), 'ls', 'pole', 60)); + sc_twenth{i,2} = sc_twenth{i,1}./tf; + sc_twenth{i,3} = degvar(sc_twenth{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_interim{i,1} = cs2sc(gsha(shiftdim(PS_interim(i,:,:)), 'ls', 'pole', 60)); + sc_interim{i,2} = sc_interim{i,1}./tf; + sc_interim{i,3} = degvar(sc_interim{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); +end + + +cd /media/storage/Analysis + +save sc_twenth2002.mat sc_twenth +save sc_opanl2002.mat sc_opanl +save sc_interim2002.mat sc_interim + +clear all + + +flder = '/media/storage/Data/Surface Pressure/OPANL/original/'; +flenm = 'ECMWF_opanl_ps_2003.nc'; +PS_opanl = nj_varget([flder, flenm], 'var134'); + +flder = '/media/storage/Data/Surface Pressure/20th/original/'; +flenm = 'pres.sfc.2003.nc'; +PS_twenth = nj_varget([flder, flenm], 'pres'); + +flder = '/media/storage/Data/Surface Pressure/INTERIM/original/'; +flenm = 'ECMWF_interim_ps_2003.nc'; +PS_interim = nj_varget([flder, flenm], 'var134'); + +cd /home/lorenz-c/Dokumente/GRACE/SHBundle + +l = standing(0:60); +tf = isotf(l, 'sp', 0, 0, 'grace'); +tf = tf*ones(1, 121); + +for i = 1:365 + sc_opanl{i,1} = cs2sc(gsha(shiftdim(PS_opanl(i,:,:)), 'ls', 'pole', 60)); + sc_opanl{i,2} = sc_opanl{i,1}./tf; + sc_opanl{i,3} = degvar(sc_opanl{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_twenth{i,1} = cs2sc(gsha(shiftdim(PS_twenth(i,:,:)), 'ls', 'pole', 60)); + sc_twenth{i,2} = sc_twenth{i,1}./tf; + sc_twenth{i,3} = degvar(sc_twenth{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_interim{i,1} = cs2sc(gsha(shiftdim(PS_interim(i,:,:)), 'ls', 'pole', 60)); + sc_interim{i,2} = sc_interim{i,1}./tf; + sc_interim{i,3} = degvar(sc_interim{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); +end + + +cd /media/storage/Analysis + +save sc_twenth2003.mat sc_twenth +save sc_opanl2003.mat sc_opanl +save sc_interim2003.mat sc_interim + +clear all + + +flder = '/media/storage/Data/Surface Pressure/OPANL/original/'; +flenm = 'ECMWF_opanl_ps_2004.nc'; +PS_opanl = nj_varget([flder, flenm], 'var134'); + +flder = '/media/storage/Data/Surface Pressure/20th/original/'; +flenm = 'pres.sfc.2004.nc'; +PS_twenth = nj_varget([flder, flenm], 'pres'); + +flder = '/media/storage/Data/Surface Pressure/INTERIM/original/'; +flenm = 'ECMWF_interim_ps_2004.nc'; +PS_interim = nj_varget([flder, flenm], 'var134'); + +cd /home/lorenz-c/Dokumente/GRACE/SHBundle + +l = standing(0:60); +tf = isotf(l, 'sp', 0, 0, 'grace'); +tf = tf*ones(1, 121); + +for i = 1:366 + sc_opanl{i,1} = cs2sc(gsha(shiftdim(PS_opanl(i,:,:)), 'ls', 'pole', 60)); + sc_opanl{i,2} = sc_opanl{i,1}./tf; + sc_opanl{i,3} = degvar(sc_opanl{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_twenth{i,1} = cs2sc(gsha(shiftdim(PS_twenth(i,:,:)), 'ls', 'pole', 60)); + sc_twenth{i,2} = sc_twenth{i,1}./tf; + sc_twenth{i,3} = degvar(sc_twenth{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_interim{i,1} = cs2sc(gsha(shiftdim(PS_interim(i,:,:)), 'ls', 'pole', 60)); + sc_interim{i,2} = sc_interim{i,1}./tf; + sc_interim{i,3} = degvar(sc_interim{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); +end + +cd /media/storage/Analysis + +save sc_twenth2004.mat sc_twenth +save sc_opanl2004.mat sc_opanl +save sc_interim2004.mat sc_interim + +clear all + + + + +flder = '/media/storage/Data/Surface Pressure/OPANL/original/'; +flenm = 'ECMWF_opanl_ps_2005.nc'; +PS_opanl = nj_varget([flder, flenm], 'var134'); + +flder = '/media/storage/Data/Surface Pressure/20th/original/'; +flenm = 'pres.sfc.2005.nc'; +PS_twenth = nj_varget([flder, flenm], 'pres'); + +flder = '/media/storage/Data/Surface Pressure/INTERIM/original/'; +flenm = 'ECMWF_interim_ps_2005.nc'; +PS_interim = nj_varget([flder, flenm], 'var134'); + +cd /home/lorenz-c/Dokumente/GRACE/SHBundle + +l = standing(0:60); +tf = isotf(l, 'sp', 0, 0, 'grace'); +tf = tf*ones(1, 121); + +for i = 1:365 + sc_opanl{i,1} = cs2sc(gsha(shiftdim(PS_opanl(i,:,:)), 'ls', 'pole', 60)); + sc_opanl{i,2} = sc_opanl{i,1}./tf; + sc_opanl{i,3} = degvar(sc_opanl{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_twenth{i,1} = cs2sc(gsha(shiftdim(PS_twenth(i,:,:)), 'ls', 'pole', 60)); + sc_twenth{i,2} = sc_twenth{i,1}./tf; + sc_twenth{i,3} = degvar(sc_twenth{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_interim{i,1} = cs2sc(gsha(shiftdim(PS_interim(i,:,:)), 'ls', 'pole', 60)); + sc_interim{i,2} = sc_interim{i,1}./tf; + sc_interim{i,3} = degvar(sc_interim{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); +end + +cd /media/storage/Analysis + +save sc_twenth2005.mat sc_twenth +save sc_opanl2005.mat sc_opanl +save sc_interim2005.mat sc_interim + +clear all + + + + +flder = '/media/storage/Data/Surface Pressure/OPANL/original/'; +flenm = 'ECMWF_opanl_ps_2006.nc'; +PS_opanl = nj_varget([flder, flenm], 'var134'); + +flder = '/media/storage/Data/Surface Pressure/20th/original/'; +flenm = 'pres.sfc.2006.nc'; +PS_twenth = nj_varget([flder, flenm], 'pres'); + +flder = '/media/storage/Data/Surface Pressure/INTERIM/original/'; +flenm = 'ECMWF_interim_ps_2006.nc'; +PS_interim = nj_varget([flder, flenm], 'var134'); + +cd /home/lorenz-c/Dokumente/GRACE/SHBundle + +l = standing(0:60); +tf = isotf(l, 'sp', 0, 0, 'grace'); +tf = tf*ones(1, 121); + +for i = 1:365 + sc_opanl{i,1} = cs2sc(gsha(shiftdim(PS_opanl(i,:,:)), 'ls', 'pole', 60)); + sc_opanl{i,2} = sc_opanl{i,1}./tf; + sc_opanl{i,3} = degvar(sc_opanl{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_twenth{i,1} = cs2sc(gsha(shiftdim(PS_twenth(i,:,:)), 'ls', 'pole', 60)); + sc_twenth{i,2} = sc_twenth{i,1}./tf; + sc_twenth{i,3} = degvar(sc_twenth{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_interim{i,1} = cs2sc(gsha(shiftdim(PS_interim(i,:,:)), 'ls', 'pole', 60)); + sc_interim{i,2} = sc_interim{i,1}./tf; + sc_interim{i,3} = degvar(sc_interim{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); +end + +cd /media/storage/Analysis + +save sc_twenth2006.mat sc_twenth +save sc_opanl2006.mat sc_opanl +save sc_interim2006.mat sc_interim + +clear all + + + + +flder = '/media/storage/Data/Surface Pressure/OPANL/original/'; +flenm = 'ECMWF_opanl_ps_2007.nc'; +PS_opanl = nj_varget([flder, flenm], 'var134'); + +flder = '/media/storage/Data/Surface Pressure/20th/original/'; +flenm = 'pres.sfc.2007.nc'; +PS_twenth = nj_varget([flder, flenm], 'pres'); + +flder = '/media/storage/Data/Surface Pressure/INTERIM/original/'; +flenm = 'ECMWF_interim_ps_2007.nc'; +PS_interim = nj_varget([flder, flenm], 'var134'); + +cd /home/lorenz-c/Dokumente/GRACE/SHBundle + +l = standing(0:60); +tf = isotf(l, 'sp', 0, 0, 'grace'); +tf = tf*ones(1, 121); + +for i = 1:365 + sc_opanl{i,1} = cs2sc(gsha(shiftdim(PS_opanl(i,:,:)), 'ls', 'pole', 60)); + sc_opanl{i,2} = sc_opanl{i,1}./tf; + sc_opanl{i,3} = degvar(sc_opanl{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_twenth{i,1} = cs2sc(gsha(shiftdim(PS_twenth(i,:,:)), 'ls', 'pole', 60)); + sc_twenth{i,2} = sc_twenth{i,1}./tf; + sc_twenth{i,3} = degvar(sc_twenth{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); + + sc_interim{i,1} = cs2sc(gsha(shiftdim(PS_interim(i,:,:)), 'ls', 'pole', 60)); + sc_interim{i,2} = sc_interim{i,1}./tf; + sc_interim{i,3} = degvar(sc_interim{i,2}, 60, 0, 'geoid', 0, 0, 'grace'); +end + +cd /media/storage/Analysis + +save sc_twenth2007.mat sc_twenth +save sc_opanl2007.mat sc_opanl +save sc_interim2007.mat sc_interim + +clear all + + + + + diff --git a/compute_zonal_prec_glob.m b/compute_zonal_prec_glob.m new file mode 100644 index 0000000..8c981c7 --- /dev/null +++ b/compute_zonal_prec_glob.m @@ -0,0 +1,34 @@ +% function [] = compute_zonal_prec_glob + + +load /media/storage/Data/Precipitation/GPCP/GPCP_PRECv2.1.mat + +gpcp_totl = comp_zon_prec(gpcp_prec, [1979 2008], 0); +gpcp_land = comp_zon_prec(gpcp_prec, [1979 2008], 3); +gpcp_ocen = comp_zon_prec(gpcp_prec, [1979 2008], 2); + +clear gpcp_prec + +load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat + +ecmwf_totl = comp_zon_prec(ecmwf_prec, [1989 2009], 0); +ecmwf_land = comp_zon_prec(ecmwf_prec, [1989 2009], 1); +ecmwf_ocen = comp_zon_prec(ecmwf_prec, [1989 2009], 2); + +clear ecmwf_prec + +load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat + +merra_totl = comp_zon_prec(merra_prec, [1979 2009], 0); +merra_land = comp_zon_prec(merra_prec, [1979 2009], 1); +merra_ocen = comp_zon_prec(merra_prec, [1979 2009], 2); + +clear merra_prec + +load /media/storage/Data/Precipitation/CFSR/CFSR_PREC01.mat + +cfsr_totl = comp_zon_prec(cfsr_prec1, [1979 2009], 0); +cfsr_land = comp_zon_prec(cfsr_prec1, [1979 2009], 1); +cfsr_ocen = comp_zon_prec(cfsr_prec1, [1979 2009], 2); + +clear cfsr_prec1 \ No newline at end of file diff --git a/condcopula.m b/condcopula.m new file mode 100644 index 0000000..f744cd3 --- /dev/null +++ b/condcopula.m @@ -0,0 +1,11 @@ +function [dC u v theta] = condcopula(Family, var) + + +[C u v theta] = copulafun(Family); + + +if strcmp(var, 'u') + dC = diff(C, u); +elseif strcmp(var, 'v') + dC = diff(C, v); +end \ No newline at end of file diff --git a/contourplots.m b/contourplots.m new file mode 100644 index 0000000..0beaaf0 --- /dev/null +++ b/contourplots.m @@ -0,0 +1,348 @@ +% First we do the CFSR-Stuff +mnth = 1; +yr = 1989; + +file1 = '/media/storage/Data/Mflux/CFSR/original/CFSR.VQ.1979-2009.nc'; +file2 = '/media/storage/Data/Mflux/CFSR/original/CFSR.UQ.1979-2009.nc'; + +for i = 121:372 + cfsr_flx{i-120,3} = nj_varget(file1, 'VQ', [i 1 1], [1 inf inf]); + cfsr_flx{i-120,4} = nj_varget(file2, 'UQ', [i 1 1], [1 inf inf]); + cfsr_flx{i-120,1} = mnth; + cfsr_flx{i-120,2} = yr; + + mnth = mnth + 1; + + if mnth == 13 + mnth = 1; + yr = yr + 1; + end +end + +lon = nj_varget(file1, 'lon'); +lon_units = nj_attget(file1, 'lon', 'units'); +lon_name = nj_attget(file1, 'lon', 'long_name'); +lon_axis = nj_attget(file1, 'lon', '_CoordinateAxisType'); + +lat = nj_varget(file1, 'lat'); +lat_units = nj_attget(file1, 'lat', 'units'); +lat_name = nj_attget(file1, 'lat', 'long_name'); +lat_axis = nj_attget(file1, 'lat', '_CoordinateAxisType'); + + +% Now we compute the seasonal mean of the CFSR-fluxes +cfsr_v = comp_spat_mean(cfsr_flx, [1989, 2006], 'seasonal_1', [1 2 3], -9999); +cfsr_u = comp_spat_mean(cfsr_flx, [1989, 2006], 'seasonal_1', [1 2 4], -9999); +cfsr_v_ann = comp_spat_mean(cfsr_flx, [1989, 2006], 'annual_1', [1 2 3], -9999); +cfsr_u_ann = comp_spat_mean(cfsr_flx, [1989, 2006], 'annual_1', [1 2 4], -9999); + +% Finally we compute the contour levels (i.e. the gradient) +for i = 1:4 + cfsr_c{i} = abs(cfsr_v{i} + cfsr_u{i}); +end + +savename{1} = 'cfsr_uq_jfm.nc'; +savename{2} = 'cfsr_vq_jfm.nc'; +savename{3} = 'cfsr_uq_jas.nc'; +savename{4} = 'cfsr_vq_jas.nc'; +savename{5} = 'cfsr_c_jfm.nc'; +savename{6} = 'cfsr_c_jas.nc'; +savename{7} = 'cfsr_uq_ann.nc'; +savename{8} = 'cfsr_vq_ann.nc'; + + +data{1} = cfsr_u{1}; +data{2} = cfsr_v{1}; +data{3} = cfsr_u{3}; +data{4} = cfsr_v{3}; +data{5} = cfsr_c{1}; +data{6} = cfsr_c{3}; +data{7} = cfsr_u_ann; +data{8} = cfsr_v_ann; + +varname{1} = 'UQ_JFM'; +varname{2} = 'VQ_JFM'; +varname{3} = 'UQ_JAS'; +varname{4} = 'VQ_JAS'; +varname{5} = 'C_JFM'; +varname{6} = 'C_JAS'; +varname{7} = 'UQ_ANN'; +varname{8} = 'VQ_ANN'; + +for i = 1:8 + ncid = netcdf.create(savename{i}, 'NC_WRITE'); + lon_dim_id = netcdf.defDim(ncid, 'longitude', length(lon)); + lat_dim_id = netcdf.defDim(ncid, 'latitude', length(lat)); + + lon_var_id = netcdf.defVar(ncid, 'longitude', 'double', lon_dim_id); + lat_var_id = netcdf.defVar(ncid, 'latitude', 'double', lat_dim_id); + + data_var_id = netcdf.defVar(ncid, varname{i}, 'double', [lon_dim_id lat_dim_id]); + netcdf.endDef(ncid); + + netcdf.putVar(ncid, lon_var_id, lon); + netcdf.putVar(ncid, lat_var_id, lat); + netcdf.putVar(ncid, data_var_id, data{i}'); + + netcdf.reDef(ncid) + + netcdf.putAtt(ncid, lon_var_id, 'units', lon_units); + netcdf.putAtt(ncid, lon_var_id, 'long_name', lon_name); + netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', lon_axis); + + + netcdf.putAtt(ncid, lat_var_id, 'units', lat_units); + netcdf.putAtt(ncid, lat_var_id, 'long_name', lat_name); + netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', lat_axis); + netcdf.close(ncid); +end + + +% Alright... Let's go for MERRA +clear all +clc + + +mnth = 1; +yr = 1989; + +for i = 121:372 + if yr < 1993 + stream = num2str(100); + elseif yr >= 1993 & yr < 2001 + stream = num2str(200); + elseif yr >= 2001 + stream = num2str(300); + end + + if mnth < 10 + txtmnth = ['0', num2str(mnth)]; + else + txtmnth = num2str(mnth); + end + + fname = ['/media/storage/Data/Mflux/MERRA/original/MERRA', stream, ... + '.prod.assim.tavgM_2d_int_Nx.', num2str(yr), txtmnth, '.SUB.nc']; + + merra_flx{i-120,3} = nj_varget(fname, 'vflxqv'); + merra_flx{i-120,4} = nj_varget(fname, 'uflxqv'); + + merra_flx{i-120,1} = mnth; + merra_flx{i-120,2} = yr; + + mnth = mnth + 1; + if mnth == 13 + mnth = 1; + yr = yr + 1; + end + +end + + +lon = nj_varget(fname, 'longitude'); +lon_units = nj_attget(fname, 'longitude', 'units'); +lon_name = nj_attget(fname, 'longitude', 'long_name'); +lon_axis = nj_attget(fname, 'longitude', '_CoordinateAxisType'); + +lat = nj_varget(fname, 'latitude'); +lat_units = nj_attget(fname, 'latitude', 'units'); +lat_name = nj_attget(fname, 'latitude', 'long_name'); +lat_axis = nj_attget(fname, 'latitude', '_CoordinateAxisType'); + + +merra_v = comp_spat_mean(merra_flx, [1989, 2006], 'seasonal_1', [1 2 3], -9999); +merra_u = comp_spat_mean(merra_flx, [1989, 2006], 'seasonal_1', [1 2 4], -9999); + + +merra_v_ann = comp_spat_mean(merra_flx, [1989, 2006], 'annual_1', [1 2 3], -9999); +merra_u_ann = comp_spat_mean(merra_flx, [1989, 2006], 'annual_1', [1 2 4], -9999); + +% Finally we compute the contour levels (i.e. the gradient) +for i = 1:4 + merra_c{i} = abs(merra_v{i} + merra_u{i}); +end + + + +savename{1} = 'merra_uq_jfm.nc'; +savename{2} = 'merra_vq_jfm.nc'; +savename{3} = 'merra_uq_jas.nc'; +savename{4} = 'merra_vq_jas.nc'; +savename{5} = 'merra_c_jfm.nc'; +savename{6} = 'merra_c_jas.nc'; +savename{7} = 'merra_uq_ann.nc'; +savename{8} = 'merra_vq_ann.nc'; + +data{1} = merra_u{1}; +data{2} = merra_v{1}; +data{3} = merra_u{3}; +data{4} = merra_v{3}; +data{5} = merra_c{1}; +data{6} = merra_c{3}; +data{7} = merra_u_ann; +data{8} = merra_v_ann; + +varname{1} = 'UQ_JFM'; +varname{2} = 'VQ_JFM'; +varname{3} = 'UQ_JAS'; +varname{4} = 'VQ_JAS'; +varname{5} = 'C_JFM'; +varname{6} = 'C_JAS'; +varname{7} = 'UQ_ANN'; +varname{8} = 'VQ_ANN'; + + +for i = 1:8 + ncid = netcdf.create(savename{i}, 'NC_WRITE'); + lon_dim_id = netcdf.defDim(ncid, 'longitude', length(lon)); + lat_dim_id = netcdf.defDim(ncid, 'latitude', length(lat)); + + lon_var_id = netcdf.defVar(ncid, 'longitude', 'double', lon_dim_id); + lat_var_id = netcdf.defVar(ncid, 'latitude', 'double', lat_dim_id); + + data_var_id = netcdf.defVar(ncid, varname{i}, 'double', [lon_dim_id lat_dim_id]); + netcdf.endDef(ncid); + + netcdf.putVar(ncid, lon_var_id, lon); + netcdf.putVar(ncid, lat_var_id, lat); + netcdf.putVar(ncid, data_var_id, data{i}'); + + netcdf.reDef(ncid) + + netcdf.putAtt(ncid, lon_var_id, 'units', lon_units); + netcdf.putAtt(ncid, lon_var_id, 'long_name', lon_name); + netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', lon_axis); + + + netcdf.putAtt(ncid, lat_var_id, 'units', lat_units); + netcdf.putAtt(ncid, lat_var_id, 'long_name', lat_name); + netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', lat_axis); + netcdf.close(ncid); +end + +clear all + +% +% AND FINALLY... DA ECMWF SHIT +clear all + +mnth = 1; +yr = 1989; + +fnme = '/media/storage/Data/Mflux/ECMWF/original/output.grib'; + +for i = 1:263 + ecmwf_flx{i,3} = nj_varget(fnme, 'Vertical_integral_of_northward_water_vapour_flux', [i 1 1], [1 inf inf]); + ecmwf_flx{i,4} = nj_varget(fnme, 'Vertical_integral_of_eastward_water_vapour_flux', [i 1 1], [1 inf inf]); + + ecmwf_flx{i,1} = mnth; + ecmwf_flx{i,2} = yr; + + mnth = mnth + 1; + + if mnth == 13 + mnth = 1; + yr = yr + 1; + end +varname{7} = 'UQ_ANN'; +varname{8} = 'VQ_ANN'; + +end + +lon = nj_varget(fnme, 'lon'); +lon_units = nj_attget(fnme, 'lon', 'units'); +lon_name = nj_attget(fnme, 'lon', 'long_name'); +lon_axis = nj_attget(fnme, 'lon', '_CoordinateAxisType'); + +lat = nj_varget(fnme, 'lat'); +lat_units = nj_attget(fnme, 'lat', 'units'); +lat_name = nj_attget(fnme, 'lat', 'long_name'); +lat_axis = nj_attget(fnme, 'lat', '_CoordinateAxisType'); + + +% Now we compute the seasonal mean of the CFSR-fluxes +ecmwf_v = comp_spat_mean(ecmwf_flx, [1989, 2006], 'seasonal_1', [1 2 3], -9999); +ecmwf_u = comp_spat_mean(ecmwf_flx, [1989, 2006], 'seasonal_1', [1 2 4], -9999); + +ecmwf_v_ann = comp_spat_mean(ecmwf_flx, [1989, 2006], 'annual_1', [1 2 3], -9999); +ecmwf_u_ann = comp_spat_mean(ecmwf_flx, [1989, 2006], 'annual_1', [1 2 4], -9999); +% Finally we compute the contour levels (i.e. the gradient) +for i = 1:4 + ecmwf_c{i} = abs(ecmwf_v{i} + ecmwf_u{i}); +end + +savename{1} = 'ecmwf_uq_jfm.nc'; +savename{2} = 'ecmwf_vq_jfm.nc'; +savename{3} = 'ecmwf_uq_jas.nc'; +savename{4} = 'ecmwf_vq_jas.nc'; +savename{5} = 'ecmwf_c_jfm.nc'; +savename{6} = 'ecmwf_c_jas.nc'; +savename{7} = 'ecmwf_uq_ann.nc'; +savename{8} = 'ecmwf_vq_ann.nc'; + +data{1} = ecmwf_u{1}; +data{2} = ecmwf_v{1}; +data{3} = ecmwf_u{3}; +data{4} = ecmwf_v{3}; +data{5} = ecmwf_c{1}; +data{6} = ecmwf_c{3}; +data{7} = ecmwf_u_ann; +data{8} = ecmwf_v_ann; + +varname{1} = 'UQ_JFM'; +varname{2} = 'VQ_JFM'; +varname{3} = 'UQ_JAS'; +varname{4} = 'VQ_JAS'; +varname{5} = 'C_JFM'; +varname{6} = 'C_JAS'; +varname{7} = 'UQ_ANN'; +varname{8} = 'VQ_ANN'; + + +for i = 1:8 + ncid = netcdf.create(savename{i}, 'NC_WRITE'); + lon_dim_id = netcdf.defDim(ncid, 'longitude', length(lon)); + lat_dim_id = netcdf.defDim(ncid, 'latitude', length(lat)); + + lon_var_id = netcdf.defVar(ncid, 'longitude', 'double', lon_dim_id); + lat_var_id = netcdf.defVar(ncid, 'latitude', 'double', lat_dim_id); + + data_var_id = netcdf.defVar(ncid, varname{i}, 'double', [lon_dim_id lat_dim_id]); + netcdf.endDef(ncid); + + netcdf.putVar(ncid, lon_var_id, lon); + netcdf.putVar(ncid, lat_var_id, lat); + netcdf.putVar(ncid, data_var_id, data{i}'); + + netcdf.reDef(ncid) + + netcdf.putAtt(ncid, lon_var_id, 'units', lon_units); + netcdf.putAtt(ncid, lon_var_id, 'long_name', lon_name); + netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', lon_axis); + + + netcdf.putAtt(ncid, lat_var_id, 'units', lat_units); + netcdf.putAtt(ncid, lat_var_id, 'long_name', lat_name); + netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', lat_axis); + netcdf.close(ncid); +end +clear all + +load /media/storage/Data/Mflux/CFSR/CFSR_VIMFD.mat +cf = comp_spat_mean(cfsr_vimfd, [1989 2006], 'annual_1', [4 5 9], -9999); +A = grid2gmt(-cf, 0.5); +save cfsr_vimfc_ann.txt A -ascii + +load /media/storage/Data/Mflux/ECMWF/ECMWF_VIMFD.mat +cf = comp_spat_mean(ecmwf_vimfd, [1989 2006], 'annual_1', [4 5 9], -9999); +A = grid2gmt(-cf, 0.5); +save ecmwf_vimfc_ann.txt A -ascii + +load /media/storage/Data/Mflux/MERRA/MERRA_VIMFD.mat +cf = comp_spat_mean(merra_vimfd, [1989 2006], 'annual_1', [4 5 9], -9999); +A = grid2gmt(-cf, 0.5); +save merra_vimfc_ann.txt A -ascii + + + +% Alright... Let's go for MERRA \ No newline at end of file diff --git a/copulafun.m b/copulafun.m new file mode 100644 index 0000000..84bd55e --- /dev/null +++ b/copulafun.m @@ -0,0 +1,11 @@ +function [C u v theta] = copulafun(Family) + +syms u v theta + +if strcmp(Family, 'Gumbel') + C = exp(-((-log(u))^theta + (-log(v))^theta)^(1/theta)); +elseif strcmp(Family, 'Clayton') + C = (u^-theta + v^-theta - 1)^(-1/theta); +elseif strcmp(Family, 'Frank') + C = -(1/theta)*log(1 + ((exp(-theta*u)-1)*(exp(-theta*v)-1))/(exp(-theta) - 1)); +end diff --git a/correct_corr.m b/correct_corr.m new file mode 100644 index 0000000..f9f837a --- /dev/null +++ b/correct_corr.m @@ -0,0 +1,14 @@ +function Cc = correct_corr(C); + +[V, D] = eig(C); + +D = max(D, 0); + +T = diag(1./(V.^2 * diag(D))); + +B = V*sqrt(D); + +B = sqrt(T)*B; + +Cc = B*B'; +% keyboard \ No newline at end of file diff --git a/correlate.m b/correlate.m new file mode 100644 index 0000000..b52f037 --- /dev/null +++ b/correlate.m @@ -0,0 +1,128 @@ +function [c,cfb,icfb,pc,pcfb,ipcfb] = correlate(in1,in2,maxlag,typ,pord,damp,flag) + +% CORRELATE calculates the cross-covariance or correlation between two +% input signals. Auto-correlation is handled as a special case of a single +% input. The calculation is similar to XCORR but this one is able to handle +% data gaps. +% +% The two input vectors can have different length. The shorter one is +% zero-padded to the length of the longer one. +% +% For auto correlation/covariance use either the same input twice or call +% the function with an emtpy matrix for the second input signal. +% +% How: [c,cfb,icfb,pc,pcfb,ipcfb] = correlate(in1,in2,maxlag,typ,pord,damp,flag) +% +% Input: in1 [n,1] input signal 1 +% in2 [m,1] input signal 2 +% maxlag [1,1] maximum lag +% typ [str] 'biased' - output is (biased) covariance +% 'unbiased' - output is (unbiased) covariance +% 'corr' - output is the (biased) correlation (default) +% 'unbcorr' - output is the (unbiased) correlation +% pord [1,1] maximum lag for partial autocovariance +% damp [1,1] damping factor for autocorrelation +% flag [1,1] flag for substraction of the mean (def:=true) +% +% Output: out [k,1] coefficients (k = max(n,m)) +% +% Weigelt, GI Stuttgart 27.04.11 + + +%% Input Check +error(nargchk(1,7,nargin)) +if ~isvector(in1), error('Input 1 must be a vector'); end +if nargin < 7 || isempty(flag), flag = true; end +if nargin < 6 || isempty(damp), damp = []; end +if nargin < 5 || isempty(pord), pord = []; end +if nargin < 4 || isempty(typ), typ = 'corr'; end +if nargin < 3 || isempty(maxlag), maxlag = fix(numel(in1)/10); end +if nargin < 2 || isempty(in2), in2 = in1; end +if ~isvector(in2), error('Input 2 must be a vector'); end + +%% Preparation +if isempty(maxlag), + maxlag = numel(in1); +elseif maxlag > numel(in1) + warning('Matlab:Correlate:MaxLagToBig','MAXLAG is larger than the number of elements. It is reduced to the latter.') + maxlag = numel(in1); +end + +% Substract the mean before we do anything +in1 = in1(:); +in2 = in2(:); +if flag, + %detrend the data with a robustfit: dataset 1 + ti = (1:size(in1,1))'; + b = robustfit(ti,in1); + in1 = in1 - (b(1) + b(2).*ti); + %detrend the data with a robustfit: dataset 2 + ti = (1:size(in2,1))'; + b = robustfit(ti,in2); + in2 = in2 - (b(1) + b(2).*ti); + clear b ti +end + +%% Calculation of the autocovariance function +log_in1 = double(~isnan(in1)); in1(isnan(in1)) = 0; +log_in2 = double(~isnan(in2)); in2(isnan(in2)) = 0; +Sout = xcorr(in1,in2,maxlag); +Sout = Sout(maxlag+1:end-1); +if strcmp(typ,'biased') + c = Sout./numel(in1); +elseif strcmp(typ,'unbiased') + Nout = round(xcorr(log_in1,log_in2,maxlag)); + Nout = Nout(maxlag+1:end-1); + c = Sout./Nout; +elseif strcmp(typ,'corr'), + c = Sout./numel(in1); + c = c./c(1); +elseif strcmp(typ,'unbcorr'), + Nout = round(xcorr(log_in1,log_in2,maxlag)); + Nout = Nout(maxlag+1:end-1); + c = Sout./Nout; + c = c./c(1); +end +if ~isempty(damp) + gwin = gausswin(2*numel(c)-1,damp); + c = c.*gwin(numel(c):end); +end +cfb = c(1).*1.96/sqrt(numel(in1)); +icfb = 1:find(abs(c) > cfb,1,'last'); + +%% Calculation of the partial autocovariance +if nargout > 3, + if isempty(pord), + pord = maxlag - 1; + elseif pord > maxlag, + warning('Matlab:Correlate:PordToBig','PORD cannot be larger than maxlag. It is reduced to the latter.') + pord = maxlag - 1; + end + pc = zeros(pord,1); + acf = c./c(1); + pc(1) = c(1); + for idx = 2:pord+1 + r = c(2:idx); + col = acf(1:idx-1); + R = toeplitz(col,col'); + hpc = R\r; + pc(idx) = hpc(end); + end + if ~isempty(damp) + gwin = gausswin(2*numel(pc)-1,damp); + pc = pc.*gwin(numel(pc):end); + end + pcfb = pc(1).*1.96/sqrt(numel(in1)); + ipcfb = 1:find(abs(pc) > pcfb,1,'last'); +end + + + + +%% ------------------------------------------------------------------------ +% uses +% m-files: +% +% revision history +% +% remarks diff --git a/corrmaps.m b/corrmaps.m new file mode 100644 index 0000000..92a8399 --- /dev/null +++ b/corrmaps.m @@ -0,0 +1,24 @@ +function R = corrmaps(ref, evl, dim) + +[n, p] = size(ref{1}); + + +if strcmp(dim, 'time') + + for i = 1:length(ref) + F_ref(i,:) = ref{i}(:)'; + F_evl(i,:) = evl{i}(:)'; + end + +elseif strcmp(dim, 'space') + + for i = 1:length(ref) + F_ref(:,i) = ref{i}(:); + F_evl(:,i) = evl{i}(:); + end + +end + + +tmp = corrcoef(F_ref, F_evl) + diff --git a/corrmaps_eof.m b/corrmaps_eof.m new file mode 100644 index 0000000..9a43787 --- /dev/null +++ b/corrmaps_eof.m @@ -0,0 +1,56 @@ +function maps = corrmaps_eof(data, pcs, lams, method, mnflg) + +if nargin < 4 + method = 'corr'; +end + +if nargin < 5 + mnflg = true; +end + +if iscell(data) + [rws, cls] = size(data{1}); + for i = 1:length(data) + F(i,:) = data{i}(:)'; + end + +else + F = data; +end + +[n_F, p_F] = size(F); +[n_P, p_P] = size(pcs); + + +if mnflg == true + F = F - ones(n_F,1)*mean(F); +end + +if n_F ~= n_P + error('Data and PCs must have the same number of time-steps') +end + +if strcmp(method, 'corr') + F_n = F./(ones(n_F,1)*sqrt(var(F))); + PC_n = pcs./(ones(n_P,1)*sqrt(var(pcs))); + + map_mtrx = 1/n_F*(F_n'*PC_n); + +elseif strcmp(method, 'cov') + + map_mtrx = 1/n_F*(F'*PC); + +end + + +if iscell(data) + keyboard + for i = 1:size(map_mtrx,2) + maps{i,1} = reshape(map_mtrx(:,i), rws, cls); + end +else + maps = map_mtrx; +end + + + \ No newline at end of file diff --git a/cov2corr.m b/cov2corr.m new file mode 100644 index 0000000..5894d2b --- /dev/null +++ b/cov2corr.m @@ -0,0 +1,12 @@ +function C = cov2corr(Q) +% The function transforms a covariance matrix Q in a correlation matrix C +% by dividing each matrix element by the correct product of the main +% diagonal elements +[r, c] = size(Q); + +for i = 1:r + for j = 1:c + C(i, j) = Q(i, j)/(sqrt(Q(i,i))*sqrt(Q(j,j))); + end +end +keyboard \ No newline at end of file diff --git a/covloc5thorder.m b/covloc5thorder.m new file mode 100644 index 0000000..796fe2d --- /dev/null +++ b/covloc5thorder.m @@ -0,0 +1,45 @@ + +function [D, Rho] = covloc5thorder(long, lat, l) + + +R = 6371; +rho = pi/180; +c = sqrt(10/3)*l; + + +long = long*rho; +lat = lat*rho; + +for i = 1:length(long) + for j = 1:length(long) + dphi = abs(lat(i) - lat(j)); + dlam = abs(long(i) - long(j)); + + tmp = sin(dphi/2)^2 + cos(lat(i))*cos(lat(j)).*sin(dlam/2)^2; + tmp = 2*atan2(sqrt(tmp), sqrt(1-tmp)); + D(i,j) = R*tmp; + + b = D(i,j)/c; + + if D(i,j) >= 0 && D(i,j) <= c + Rho(i,j) = -1/4*b^5 + 1/2*b^4 + 5/8*b^3 - 5/3*b^2 + 1; + elseif D(i,j) > c && D(i,j) <= 2*c + Rho(i,j) = 1/12*b^5 - 1/2*b^4 + 5/8*b^3 + 5/3*b^2 -5*b + 4 - 2/3*(1/b); + elseif D(i,j) > 2*c + Rho(i,j) = 0; + end + + + + end +end + + + + +% +% a = sin(dlat/2)^2 + cos(theta1)*cos(theta2)*sin(dlambda/2)^2; +% c = 2*atan2(sqrt(a), sqrt(1-a)); +% s = R*c; + + diff --git a/crt_corr_ts.m b/crt_corr_ts.m new file mode 100644 index 0000000..c9b2e5d --- /dev/null +++ b/crt_corr_ts.m @@ -0,0 +1,529 @@ +% load /media/storage/Data/Precipitation/GPCC/GPCC_PRECv4.0.mat +% gpcc_glob_ann = comp_glob_quant(gpcc_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% gpcc_glob_mon = comp_glob_quant(gpcc_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% gpcc_nhsh_ann = comp_glob_quant(gpcc_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% gpcc_nhsh_mon = comp_glob_quant(gpcc_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% gpcc_trpc_ann = comp_glob_quant(gpcc_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% gpcc_trpc_mon = comp_glob_quant(gpcc_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% gpcc_cont_ann = comp_cont_quant(gpcc_prec, [1989 2006], 'annual', [7 6 3 8 9 1], [4 5 9], -9999, 1); +% gpcc_cont_mon = comp_cont_quant(gpcc_prec, [1989 2006], 'monthly', [7 6 3 8 9 1], [4 5 9], -9999, 1); +% clear gpcc_prec +% +% +% gpcc_ann(:,1) = gpcc_glob_ann(:,2); +% gpcc_ann(:,2) = gpcc_nhsh_ann(:,2); +% gpcc_ann(:,3) = gpcc_nhsh_ann(:,3); +% gpcc_ann(:,4) = gpcc_cont_ann(:,2); +% gpcc_ann(:,5) = gpcc_cont_ann(:,3); +% gpcc_ann(:,6) = gpcc_cont_ann(:,4); +% gpcc_ann(:,7) = gpcc_cont_ann(:,5); +% gpcc_ann(:,8) = gpcc_cont_ann(:,6); +% gpcc_ann(:,9) = gpcc_cont_ann(:,7); +% gpcc_ann(:,10) = gpcc_trpc_ann(:,2); +% +% gpcc_mon(:,1) = gpcc_glob_mon(:,2); +% gpcc_mon(:,2) = gpcc_nhsh_mon(:,2); +% gpcc_mon(:,3) = gpcc_nhsh_mon(:,3); +% gpcc_mon(:,4) = gpcc_cont_mon(:,2); +% gpcc_mon(:,5) = gpcc_cont_mon(:,3); +% gpcc_mon(:,6) = gpcc_cont_mon(:,4); +% gpcc_mon(:,7) = gpcc_cont_mon(:,5); +% gpcc_mon(:,8) = gpcc_cont_mon(:,6); +% gpcc_mon(:,9) = gpcc_cont_mon(:,7); +% gpcc_mon(:,10) = gpcc_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% load /media/storage/Data/Precipitation/GPCP/GPCP_PRECv2.1.mat +% gpcp_glob_ann = comp_glob_quant(gpcp_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% gpcp_glob_mon = comp_glob_quant(gpcp_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% gpcp_nhsh_ann = comp_glob_quant(gpcp_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% gpcp_nhsh_mon = comp_glob_quant(gpcp_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% gpcp_trpc_ann = comp_glob_quant(gpcp_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% gpcp_trpc_mon = comp_glob_quant(gpcp_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% gpcp_cont_ann = comp_cont_quant(gpcp_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% gpcp_cont_mon = comp_cont_quant(gpcp_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% clear gpcp_prec +% +% gpcp_ann(:,1) = gpcp_glob_ann(:,2); +% gpcp_ann(:,2) = gpcp_nhsh_ann(:,2); +% gpcp_ann(:,3) = gpcp_nhsh_ann(:,3); +% gpcp_ann(:,4) = gpcp_cont_ann(:,2); +% gpcp_ann(:,5) = gpcp_cont_ann(:,3); +% gpcp_ann(:,6) = gpcp_cont_ann(:,4); +% gpcp_ann(:,7) = gpcp_cont_ann(:,5); +% gpcp_ann(:,8) = gpcp_cont_ann(:,6); +% gpcp_ann(:,9) = gpcp_cont_ann(:,7); +% gpcp_ann(:,10) = gpcp_trpc_ann(:,2); +% +% gpcp_mon(:,1) = gpcp_glob_mon(:,2); +% gpcp_mon(:,2) = gpcp_nhsh_mon(:,2); +% gpcp_mon(:,3) = gpcp_nhsh_mon(:,3); +% gpcp_mon(:,4) = gpcp_cont_mon(:,2); +% gpcp_mon(:,5) = gpcp_cont_mon(:,3); +% gpcp_mon(:,6) = gpcp_cont_mon(:,4); +% gpcp_mon(:,7) = gpcp_cont_mon(:,5); +% gpcp_mon(:,8) = gpcp_cont_mon(:,6); +% gpcp_mon(:,9) = gpcp_cont_mon(:,7); +% gpcp_mon(:,10) = gpcp_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% +% +% load /media/storage/Data/Precipitation/CRU3/CRU3_PRECv3.0.mat +% cru_glob_ann = comp_glob_quant(cru_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% cru_glob_mon = comp_glob_quant(cru_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% cru_nhsh_ann = comp_glob_quant(cru_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% cru_nhsh_mon = comp_glob_quant(cru_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% cru_trpc_ann = comp_glob_quant(cru_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% cru_trpc_mon = comp_glob_quant(cru_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% cru_cont_ann = comp_cont_quant(cru_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% cru_cont_mon = comp_cont_quant(cru_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% clear cru_prec +% +% cru_ann(:,1) = cru_glob_ann(:,2); +% cru_ann(:,2) = cru_nhsh_ann(:,2); +% cru_ann(:,3) = cru_nhsh_ann(:,3); +% cru_ann(:,4) = cru_cont_ann(:,2); +% cru_ann(:,5) = cru_cont_ann(:,3); +% cru_ann(:,6) = cru_cont_ann(:,4); +% cru_ann(:,7) = cru_cont_ann(:,5); +% cru_ann(:,8) = cru_cont_ann(:,6); +% cru_ann(:,9) = cru_cont_ann(:,7); +% cru_ann(:,10) = cru_trpc_ann(:,2); +% +% cru_mon(:,1) = cru_glob_mon(:,2); +% cru_mon(:,2) = cru_nhsh_mon(:,2); +% cru_mon(:,3) = cru_nhsh_mon(:,3); +% cru_mon(:,4) = cru_cont_mon(:,2); +% cru_mon(:,5) = cru_cont_mon(:,3); +% cru_mon(:,6) = cru_cont_mon(:,4); +% cru_mon(:,7) = cru_cont_mon(:,5); +% cru_mon(:,8) = cru_cont_mon(:,6); +% cru_mon(:,9) = cru_cont_mon(:,7); +% cru_mon(:,10) = cru_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% +% load /media/storage/Data/Precipitation/CPC/CPC_PREC.mat +% cpc_glob_ann = comp_glob_quant(cpc_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% cpc_glob_mon = comp_glob_quant(cpc_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% cpc_nhsh_ann = comp_glob_quant(cpc_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% cpc_nhsh_mon = comp_glob_quant(cpc_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% cpc_trpc_ann = comp_glob_quant(cpc_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% cpc_trpc_mon = comp_glob_quant(cpc_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% cpc_cont_ann = comp_cont_quant(cpc_prec, [1989 2006], 'annual',[7 6 3 8 9 1], [4 5 9], -9999, 1); +% cpc_cont_mon = comp_cont_quant(cpc_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% clear cpc_prec +% +% +% cpc_ann(:,1) = cpc_glob_ann(:,2); +% cpc_ann(:,2) = cpc_nhsh_ann(:,2); +% cpc_ann(:,3) = cpc_nhsh_ann(:,3); +% cpc_ann(:,4) = cpc_cont_ann(:,2); +% cpc_ann(:,5) = cpc_cont_ann(:,3); +% cpc_ann(:,6) = cpc_cont_ann(:,4); +% cpc_ann(:,7) = cpc_cont_ann(:,5); +% cpc_ann(:,8) = cpc_cont_ann(:,6); +% cpc_ann(:,9) = cpc_cont_ann(:,7); +% cpc_ann(:,10) = cpc_trpc_ann(:,2); +% +% cpc_mon(:,1) = cpc_glob_mon(:,2); +% cpc_mon(:,2) = cpc_nhsh_mon(:,2); +% cpc_mon(:,3) = cpc_nhsh_mon(:,3); +% cpc_mon(:,4) = cpc_cont_mon(:,2); +% cpc_mon(:,5) = cpc_cont_mon(:,3); +% cpc_mon(:,6) = cpc_cont_mon(:,4); +% cpc_mon(:,7) = cpc_cont_mon(:,5); +% cpc_mon(:,8) = cpc_cont_mon(:,6); +% cpc_mon(:,9) = cpc_cont_mon(:,7); +% cpc_mon(:,10) = cpc_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +% ecmwf_glob_ann = comp_glob_quant(ecmwf_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% ecmwf_glob_mon = comp_glob_quant(ecmwf_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% ecmwf_nhsh_ann = comp_glob_quant(ecmwf_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% ecmwf_nhsh_mon = comp_glob_quant(ecmwf_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% ecmwf_trpc_ann = comp_glob_quant(ecmwf_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% ecmwf_trpc_mon = comp_glob_quant(ecmwf_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% ecmwf_cont_ann = comp_cont_quant(ecmwf_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% ecmwf_cont_mon = comp_cont_quant(ecmwf_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% clear ecmwf_prec +% +% +% ecmwf_ann(:,1) = ecmwf_glob_ann(:,2); +% ecmwf_ann(:,2) = ecmwf_nhsh_ann(:,2); +% ecmwf_ann(:,3) = ecmwf_nhsh_ann(:,3); +% ecmwf_ann(:,4) = ecmwf_cont_ann(:,2); +% ecmwf_ann(:,5) = ecmwf_cont_ann(:,3); +% ecmwf_ann(:,6) = ecmwf_cont_ann(:,4); +% ecmwf_ann(:,7) = ecmwf_cont_ann(:,5); +% ecmwf_ann(:,8) = ecmwf_cont_ann(:,6); +% ecmwf_ann(:,9) = ecmwf_cont_ann(:,7); +% ecmwf_ann(:,10) = ecmwf_trpc_ann(:,2); +% +% ecmwf_mon(:,1) = ecmwf_glob_mon(:,2); +% ecmwf_mon(:,2) = ecmwf_nhsh_mon(:,2); +% ecmwf_mon(:,3) = ecmwf_nhsh_mon(:,3); +% ecmwf_mon(:,4) = ecmwf_cont_mon(:,2); +% ecmwf_mon(:,5) = ecmwf_cont_mon(:,3); +% ecmwf_mon(:,6) = ecmwf_cont_mon(:,4); +% ecmwf_mon(:,7) = ecmwf_cont_mon(:,5); +% ecmwf_mon(:,8) = ecmwf_cont_mon(:,6); +% ecmwf_mon(:,9) = ecmwf_cont_mon(:,7); +% ecmwf_mon(:,10) = ecmwf_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat +% merra_glob_ann = comp_glob_quant(merra_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% merra_glob_mon = comp_glob_quant(merra_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% merra_nhsh_ann = comp_glob_quant(merra_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% merra_nhsh_mon = comp_glob_quant(merra_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% merra_trpc_ann = comp_glob_quant(merra_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% merra_trpc_mon = comp_glob_quant(merra_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% merra_cont_ann = comp_cont_quant(merra_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% merra_cont_mon = comp_cont_quant(merra_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% clear merra_prec +% +% +% merra_ann(:,1) = merra_glob_ann(:,2); +% merra_ann(:,2) = merra_nhsh_ann(:,2); +% merra_ann(:,3) = merra_nhsh_ann(:,3); +% merra_ann(:,4) = merra_cont_ann(:,2); +% merra_ann(:,5) = merra_cont_ann(:,3); +% merra_ann(:,6) = merra_cont_ann(:,4); +% merra_ann(:,7) = merra_cont_ann(:,5); +% merra_ann(:,8) = merra_cont_ann(:,6); +% merra_ann(:,9) = merra_cont_ann(:,7); +% merra_ann(:,10) = merra_trpc_ann(:,2); +% +% merra_mon(:,1) = merra_glob_mon(:,2); +% merra_mon(:,2) = merra_nhsh_mon(:,2); +% merra_mon(:,3) = merra_nhsh_mon(:,3); +% merra_mon(:,4) = merra_cont_mon(:,2); +% merra_mon(:,5) = merra_cont_mon(:,3); +% merra_mon(:,6) = merra_cont_mon(:,4); +% merra_mon(:,7) = merra_cont_mon(:,5); +% merra_mon(:,8) = merra_cont_mon(:,6); +% merra_mon(:,9) = merra_cont_mon(:,7); +% merra_mon(:,10) = merra_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat +% cfsr_glob_ann = comp_glob_quant(cfsr_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% cfsr_glob_mon = comp_glob_quant(cfsr_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% cfsr_nhsh_ann = comp_glob_quant(cfsr_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% cfsr_nhsh_mon = comp_glob_quant(cfsr_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% cfsr_trpc_ann = comp_glob_quant(cfsr_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% cfsr_trpc_mon = comp_glob_quant(cfsr_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% cfsr_cont_ann = comp_cont_quant(cfsr_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% cfsr_cont_mon = comp_cont_quant(cfsr_prec, [1989 2006], 'monthly',[7 6 3 8 9 1], [4 5 9], -9999, 1); +% clear cfsr_prec +% +% +% cfsr_ann(:,1) = cfsr_glob_ann(:,2); +% cfsr_ann(:,2) = cfsr_nhsh_ann(:,2); +% cfsr_ann(:,3) = cfsr_nhsh_ann(:,3); +% cfsr_ann(:,4) = cfsr_cont_ann(:,2); +% cfsr_ann(:,5) = cfsr_cont_ann(:,3); +% cfsr_ann(:,6) = cfsr_cont_ann(:,4); +% cfsr_ann(:,7) = cfsr_cont_ann(:,5); +% cfsr_ann(:,8) = cfsr_cont_ann(:,6); +% cfsr_ann(:,9) = cfsr_cont_ann(:,7); +% cfsr_ann(:,10) = cfsr_trpc_ann(:,2); +% +% cfsr_mon(:,1) = cfsr_glob_mon(:,2); +% cfsr_mon(:,2) = cfsr_nhsh_mon(:,2); +% cfsr_mon(:,3) = cfsr_nhsh_mon(:,3); +% cfsr_mon(:,4) = cfsr_cont_mon(:,2); +% cfsr_mon(:,5) = cfsr_cont_mon(:,3); +% cfsr_mon(:,6) = cfsr_cont_mon(:,4); +% cfsr_mon(:,7) = cfsr_cont_mon(:,5); +% cfsr_mon(:,8) = cfsr_cont_mon(:,6); +% cfsr_mon(:,9) = cfsr_cont_mon(:,7); +% cfsr_mon(:,10) = cfsr_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* + + +save prec_timeseries.mat + + + +text_str{1} = 'Global land'; +text_str{2} = 'Northern hemisphere'; +text_str{3} = 'Southern hemisphere'; +text_str{4} = 'North America'; +text_str{5} = 'South America'; +text_str{6} = 'Europe'; +text_str{7} = 'Africa'; +text_str{8} = 'Asia'; +text_str{9} = 'Australia'; +text_str{10} = '15S - 15N (Tropics)'; + +yrs{1} = ' '; +yrs{2} = '1990'; +yrs{3} = ' '; +yrs{4} = '1992 '; +yrs{5} = ' '; +yrs{6} = '1994 '; +yrs{7} = ' '; +yrs{8} = '1996 '; +yrs{9} = ' '; +yrs{10} = '1998 '; +yrs{11} = ' '; +yrs{12} = '2000'; +yrs{13} = ' '; +yrs{14} = '2002 '; +yrs{15} = ' '; +yrs{16} = '2004 '; +yrs{17} = ' '; +yrs{18} = '2006 '; + +mnths{1} = 'J'; +mnths{2} = 'F'; +mnths{3} = 'M'; +mnths{4} = 'A'; +mnths{5} = 'M'; +mnths{6} = 'J'; +mnths{7} = 'J'; +mnths{8} = 'A'; +mnths{9} = 'S'; +mnths{10} = 'O'; +mnths{11} = 'N'; +mnths{12} = 'D'; + +fnames_ann{1} = 'P_ann_glob_ts'; +fnames_ann{2} = 'P_ann_NH_ts'; +fnames_ann{3} = 'P_ann_SH_ts'; +fnames_ann{4} = 'P_ann_NA_ts'; +fnames_ann{5} = 'P_ann_SA_ts'; +fnames_ann{6} = 'P_ann_E_ts'; +fnames_ann{7} = 'P_ann_AF_ts'; +fnames_ann{8} = 'P_ann_AS_ts'; +fnames_ann{9} = 'P_ann_AU_ts'; +fnames_ann{10} = 'P_ann_TR_ts'; + +fnames_mon{1} = 'P_mon_glob_ts'; +fnames_mon{2} = 'P_mon_NH_ts'; +fnames_mon{3} = 'P_mon_SH_ts'; +fnames_mon{4} = 'P_mon_NA_ts'; +fnames_mon{5} = 'P_mon_SA_ts'; +fnames_mon{6} = 'P_mon_E_ts'; +fnames_mon{7} = 'P_mon_AF_ts'; +fnames_mon{8} = 'P_mon_AS_ts'; +fnames_mon{9} = 'P_mon_AU_ts'; +fnames_mon{10} = 'P_mon_TR_ts'; + +for i = 1:10 + h = figure('papersize', [4.5 4], 'paperunits', 'centimeters') + plot(cru_ann(:,i) - gpcc_ann(:,i), 'c', 'linewidth', 1.5); + hold on + plot(gpcp_ann(:,i) - gpcc_ann(:,i), 'm', 'linewidth', 1.5); + plot(cpc_ann(:,i) - gpcc_ann(:,i), 'y', 'linewidth', 1.5); + plot(ecmwf_ann(:,i) - gpcc_ann(:,i), 'b', 'linewidth', 1.5); + plot(merra_ann(:,i) - gpcc_ann(:,i), 'r', 'linewidth', 1.5); + plot(cfsr_ann(:,i) - gpcc_ann(:,i), 'g', 'linewidth', 1.5); + + grid on + axis([1 18 -1 1]); + set(gca, 'xtick', 1:1:18); + set(gca, 'ytick', -1:0.25:1); + set(gca, 'xticklabel', yrs, 'fontsize', 16); + pbaspect([17 8 1]); + + + text(2, 0.75, text_str{i} , 'fontsize', 20); +if i == 1 + leg = legend('GPCP', 'CRU', 'CPC', 'INTERIM', 'MERRA', 'CFSR', 'location','Best'); + set(leg, 'fontsize', 12) + keyboard + end + print(h, '-depsc2', fnames_ann{i}); + clear h + + + h = figure('papersize', [4.5 4], 'paperunits', 'centimeters') + plot(cru_mon(:,i) - gpcc_mon(:,i), 'c', 'linewidth', 1.5); + hold on + plot(gpcp_mon(:,i) - gpcc_mon(:,i), 'm', 'linewidth', 1.5); + plot(cpc_mon(:,i) - gpcc_mon(:,i), 'y', 'linewidth', 1.5); + plot(ecmwf_mon(:,i) - gpcc_mon(:,i), 'b', 'linewidth', 1.5); + plot(merra_mon(:,i) - gpcc_mon(:,i), 'r', 'linewidth', 1.5); + plot(cfsr_mon(:,i) - gpcc_mon(:,i), 'g', 'linewidth', 1.5); + + grid on + axis([1 12 -1.5 1.5]); + set(gca, 'xtick', 1:1:12); + set(gca, 'ytick', -1.5:0.5:1.5); + set(gca, 'xticklabel', mnths, 'fontsize', 16); + pbaspect([12 6 1]); + + + text(2, 1, text_str{i} , 'fontsize', 20); + if i == 1 + leg = legend('GPCP', 'CRU', 'CPC', 'INTERIM', 'MERRA', 'CFSR', 'location','Best'); + set(leg, 'fontsize', 12) + keyboard + end + print(h, '-depsc2', fnames_mon{i}); + clear h + close all +end + + + + +R_cru = comp_spat_corr(gpcc_prec, cru_prec, [1989 2006], 'annual_2', 1, [4 5 9], -9999); +clear cru_prec + +load /media/storage/Data/Precipitation/CPC/CPC_PREC.mat +R_cpc = comp_spat_corr(gpcc_prec, cpc_prec, [1989 2006], 'annual_2', 1, [4 5 9], -9999); +clear cpc_prec + +load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +R_ecmwf = comp_spat_corr(gpcc_prec, ecmwf_prec, [1989 2006], 'annual_2', 1, [4 5 9], -9999); +clear ecmwf_prec + + +load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat +R_merra = comp_spat_corr(gpcc_prec, merra_prec, [1989 2006], 'annual_2', 1, [4 5 9], -9999); +clear merra_prec + +load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat +R_cfsr = comp_spat_corr(gpcc_prec, cfsr_prec, [1989 2006], 'annual_2', 1, [4 5 9], -9999); +clear cfsr_prec + + + + + + +cindx = [7 6 3 8 9 1]; + +% fname{1} = 'R_ann_NA.eps'; +% fname{2} = 'R_ann_SA.eps'; +% fname{3} = 'R_ann_E.eps'; +% fname{4} = 'R_ann_AF.eps'; +% fname{5} = 'R_ann_AS.eps'; +% fname{6} = 'R_ann_AU.eps'; + +% fname{1} = 'P_mnth_NA.eps'; +% fname{2} = 'P_mnth_SA.eps'; +% fname{3} = 'P_mnth_E.eps'; +% fname{4} = 'P_mnth_AF.eps'; +% fname{5} = 'P_mnth_AS.eps'; +% fname{6} = 'P_mnth_AU.eps'; +% +% mnths{1} = 'J'; +% mnths{2} = 'F'; +% mnths{3} = 'M'; +% mnths{4} = 'A'; +% mnths{5} = 'M'; +% mnths{6} = 'J'; +% mnths{7} = 'J'; +% mnths{8} = 'A'; +% mnths{9} = 'S'; +% mnths{10} = 'O'; +% mnths{11} = 'N'; +% mnths{12} = 'D'; +% +yrs{1} = ' '; +yrs{2} = '1990'; +yrs{3} = ' '; +yrs{4} = '1992 '; +yrs{5} = ' '; +yrs{6} = '1994 '; +yrs{7} = ' '; +yrs{8} = '1996 '; +yrs{9} = ' '; +yrs{10} = '1998 '; +yrs{11} = ' '; +yrs{12} = '2000'; +yrs{13} = ' '; +yrs{14} = '2002 '; +yrs{15} = ' '; +yrs{16} = '2004 '; +yrs{17} = ' '; +yrs{18} = '2006 '; + + +% for i = 1:6 +h = figure('papersize', [4.5 4], 'paperunits', 'centimeters') + +% plot(P_gpcc(:, cindx(i)), 'k', 'linewidth', 1.5); + +% plot(R_cru(:, cindx(i))-P_gpcc(:, cindx(i)), 'k--', 'linewidth', 1.5); +% hold on +% plot(R_cpc(:, cindx(i))-P_gpcc(:, cindx(i)), 'k-.', 'linewidth', 1.5); +% plot(R_ecmwf(:, cindx(i))-P_gpcc(:, cindx(i)), 'color',[0.4 0.4 0.4], 'linewidth', 1.5); +% plot(R_merra(:, cindx(i))-P_gpcc(:, cindx(i)), '--', 'color',[0.4 0.4 0.4], 'linewidth', 1.5); +% plot(P_cfsr(:, cindx(i))-P_gpcc(:, cindx(i)), '-.', 'color',[0.4 0.4 0.4], 'linewidth', 1.5); +% plot(R_cru(cindx(i), :), 'm', 'linewidth', 1.5); +% hold on +% plot(R_cpc(cindx(i), :), 'c', 'linewidth', 1.5); +% plot(R_ecmwf(cindx(i), :), 'b', 'linewidth', 1.5); +% plot(R_merra(cindx(i), :), 'r', 'linewidth', 1.5); +% plot(R_cfsr(cindx(i), :), 'g', 'linewidth', 1.5); +plot(R_cru(1, :), 'm', 'linewidth', 1.5); +hold on +plot(R_cpc(1, :), 'c', 'linewidth', 1.5); +plot(R_ecmwf(1, :), 'b', 'linewidth', 1.5); +plot(R_merra(1, :), 'r', 'linewidth', 1.5); +plot(R_cfsr(1, :), 'g', 'linewidth', 1.5); + +grid on +axis([1 12 0.6 1]); +% axis([1 18 0.5 1]); +% set(gca, 'ytick', 0.5:0.05:1); +set(gca, 'ytick', 0.6:0.05:1); +% set(gca, 'xtick', 1:1:12) +% set(gca, 'xticklabel', mnths, 'fontsize', 16) + +set(gca, 'xtick', 1:1:18) +set(gca, 'xticklabel', yrs, 'fontsize', 16) +% axis([1 18 -1 1]) +pbaspect([12 8 1]); +% pbaspect([17 10 1]); +if i == 1 + legend('CRU', 'CPC', 'INTERIM', 'MERRA', 'CFSR', 'location', ... + 'southeast', 'fontsize', 16); +end +% if i == 1 || i == 4 +% ylabel('[mm/day]', 'fontsize', 16) +% end +if i == 1 + keyboard +end +print(h, '-depsc2', 'corr_mnth_mean.eps'); + +% end + + diff --git a/crt_eof_ts.m b/crt_eof_ts.m new file mode 100644 index 0000000..1d37e16 --- /dev/null +++ b/crt_eof_ts.m @@ -0,0 +1,24 @@ + +load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat +[eofs_merra, pcs_merra, var_merra, rec_merra] = eof_ana(merra_prec, [1989 2006], 0, 1, -9999, [4 5 9], 0); +clear merra_prec + +load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat +[eofs_cfsr, pcs_cfsr, var_cfsr, rec_cfsr] = eof_ana(cfsr_prec, [1989 2006], 0, 1, -9999, [4 5 9], 0); +clear cfsr_prec + +load /media/storage/Data/Precipitation/GPCC/GPCC_PRECv4.0.mat +[eofs_gpcc, pcs_gpcc, var_gpcc, rec_gpcc] = eof_ana(gpcc_prec, [1989 2006], 0, 1, -9999, [4 5 9], 0); +clear gpcc_prec + +load /media/storage/Data/Precipitation/CRU3/CRU3_PRECv3.0.mat +[eofs_cru, pcs_cru, var_cru, rec_cru] = eof_ana(cru_prec, [1989 2006], 0, 1, -9999, [4 5 9], 0); +clear cru_prec + +load /media/storage/Data/Precipitation/CPC/CPC_PREC.mat +[eofs_cpc, pcs_cpc, var_cpc, rec_cpc] = eof_ana(cpc_prec, [1989 2006], 0, 1, -9999, [4 5 9], 0); +clear cru_prec + +load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +[eofs_ecmwf, pcs_ecmwf, var_ecmwf, rec_ecmwf] = eof_ana(ecmwf_prec, [1989 2006], 0, 1, -9999, [4 5 9], 0); +clear ecmwf_prec \ No newline at end of file diff --git a/crt_euro_ts.m b/crt_euro_ts.m new file mode 100644 index 0000000..b46cbf8 --- /dev/null +++ b/crt_euro_ts.m @@ -0,0 +1,406 @@ +merra_euro{1,1} = zeros(101,232); +merra_euro{2,1} = zeros(101,232); +merra_euro{3,1} = zeros(101,232); +merra_euro{4,1} = zeros(101,232); +merra_euro{5,1} = zeros(101,232); +merra_euro{6,1} = zeros(101,232); +merra_euro{7,1} = zeros(101,232); + +ecmwf_euro{1,1} = zeros(101,232); +ecmwf_euro{2,1} = zeros(101,232); +ecmwf_euro{3,1} = zeros(101,232); +ecmwf_euro{4,1} = zeros(101,232); +ecmwf_euro{5,1} = zeros(101,232); +ecmwf_euro{6,1} = zeros(101,232); +ecmwf_euro{7,1} = zeros(101,232); + +% gpcc_euro{1,1} = zeros(101,232); +% gpcc_euro{2,1} = zeros(101,232); +% gpcc_euro{3,1} = zeros(101,232); +% gpcc_euro{4,1} = zeros(101,232); +% gpcc_euro{5,1} = zeros(101,232); +% gpcc_euro{6,1} = zeros(101,232); +% gpcc_euro{7,1} = zeros(101,232); + +eobs_euro{1,1} = zeros(101,232); +eobs_euro{2,1} = zeros(101,232); +eobs_euro{3,1} = zeros(101,232); +eobs_euro{4,1} = zeros(101,232); +eobs_euro{5,1} = zeros(101,232); +eobs_euro{6,1} = zeros(101,232); +eobs_euro{7,1} = zeros(101,232); + +n_days = [366 365 365 365 366 365 365 365]; + +for i = 1:7 + for j = 1:12 + merra_euro{i,1} = merra_euro{i,1} + flipud(merra_t2{j+(i-1)*12,3}(30:130, 280:511)); + ecmwf_euro{i,1} = ecmwf_euro{i,1} + flipud(ecmwf_t2{j+(i-1)*12,3}(30:130, 280:511)); +% gpcc_euro{i,1} = gpcc_euro{i,1} + flipud(gpcc_prec{j+(i-1)*12,8}(30:130, 280:511)); + end + merra_euro{i,1} = merra_euro{i,1}/12-273.15; + ecmwf_euro{i,1} = ecmwf_euro{i,1}/12-273.15; +end + + +for i = 1:7 + for j = 1:n_days(i) + tmp = zeros(101,232); + tmp(eobs_t2{j+(i-1)*n_days(i),9}~=-9999) = eobs_t2{j+(i-1)*n_days(i),9}(eobs_t2{j+(i-1)*n_days(i),9}~=-9999); + eobs_euro{i,1} = eobs_euro{i,1} + tmp; + end + eobs_euro{i,1} = eobs_euro{i,1}/n_days(i)*0.01; +end + + +for i = 1:7 + merra_euro{i,1}(eobs_t2{1,9}==-9999) = -9999; + ecmwf_euro{i,1}(eobs_t2{1,9}==-9999) = -9999; + eobs_euro{i,1}(eobs_t2{1,9}==-9999) = -9999; +% gpcc_euro{i,1}(gpcc_euro{i,1}<0) = 0; +end + + + + +figure +subplot(1,3,1) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, eobs_euro{1,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('E-OBS') +hold off + +% subplot(1,4,2) +% imagesc(eobs_t2{1,8}, eobs_t2{1,7}, gpcc_euro{1,1}) +% axis xy +% axis square +% hold on +% plot(long,lat,'k'); +% % colormap(precip_cmap); +% % caxis([0 2000]) +% title('GPCC') +% hold off + +subplot(1,3,2) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, ecmwf_euro{1,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('ECMWF') +hold off + +subplot(1,3,3) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, merra_euro{1,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('MERRA') +hold off + + + + +figure +subplot(1,3,1) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, eobs_euro{2,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('E-OBS') +hold off + +% subplot(1,3,2) +% imagesc(eobs_prec{1,8}, eobs_prec{1,7}, gpcc_euro{2,1}) +% axis xy +% axis square +% hold on +% plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +% title('GPCC') +% hold off + +subplot(1,3,2) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, ecmwf_euro{2,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('ECMWF') +hold off + +subplot(1,3,3) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, merra_euro{2,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('MERRA') +hold off + + + + +figure +subplot(1,3,1) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, eobs_euro{3,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('E-OBS') +hold off + +% subplot(1,4,2) +% imagesc(eobs_prec{1,8}, eobs_prec{1,7}, gpcc_euro{3,1}) +% axis xy +% axis square +% hold on +% plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +% title('GPCC') +% hold off + +subplot(1,3,2) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, ecmwf_euro{3,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('ECMWF') +hold off + +subplot(1,3,3) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, merra_euro{3,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('MERRA') +hold off + + + + +figure +subplot(1,3,1) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, eobs_euro{4,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('E-OBS') +hold off + +% subplot(1,4,2) +% imagesc(eobs_prec{1,8}, eobs_prec{1,7}, gpcc_euro{4,1}) +% axis xy +% axis square +% hold on +% plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +% title('GPCC') +% hold off + +subplot(1,3,2) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, ecmwf_euro{4,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('ECMWF') +hold off + +subplot(1,3,3) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, merra_euro{4,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('MERRA') +hold off + + + +figure +subplot(1,3,1) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, eobs_euro{5,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('E-OBS') +hold off + +% subplot(1,4,2) +% imagesc(eobs_prec{1,8}, eobs_prec{1,7}, gpcc_euro{5,1}) +% axis xy +% axis square +% hold on +% plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +% title('GPCC') +% hold off + +subplot(1,3,2) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, ecmwf_euro{5,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('ECMWF') +hold off + +subplot(1,3,3) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, merra_euro{5,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('MERRA') +hold off + + +figure +subplot(1,3,1) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, eobs_euro{6,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('E-OBS') +hold off + +% subplot(1,4,2) +% imagesc(eobs_prec{1,8}, eobs_prec{1,7}, gpcc_euro{6,1}) +% axis xy +% axis square +% hold on +% plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +% title('GPCC') +% hold off + +subplot(1,3,2) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, ecmwf_euro{6,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('ECMWF') +hold off + +subplot(1,3,3) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, merra_euro{6,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('MERRA') +hold off + + +figure +subplot(1,3,1) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, eobs_euro{7,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('E-OBS') +hold off + +% subplot(1,4,2) +% imagesc(eobs_prec{1,8}, eobs_prec{1,7}, gpcc_euro{7,1}) +% axis xy +% axis square +% hold on +% plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +% title('GPCC') +% hold off + +subplot(1,3,2) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, ecmwf_euro{7,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('ECMWF') +hold off + +subplot(1,3,3) +imagesc(eobs_t2{1,8}, eobs_t2{1,7}, merra_euro{7,1}) +axis xy +axis square +hold on +plot(long,lat,'k'); +% colormap(precip_cmap); +% caxis([0 2000]) +title('MERRA') +hold off + + + + + + + + + + + + + + diff --git a/crt_glob_ts.m b/crt_glob_ts.m new file mode 100644 index 0000000..721343c --- /dev/null +++ b/crt_glob_ts.m @@ -0,0 +1,54 @@ +function [mnth_ts ann_ts] = crt_glob_ts(inpt, ts, clms) + + + + + +mnth = cell2mat(inpt(:,clms(1))); +yr = cell2mat(inpt(:,clms(2))); + +sind = find(mnth == 1 & yr == ts(1)); +eind = find(mnth == 12 & yr == ts(2)); + + +inpt = inpt(sind:eind,[clms(1) clms(2) clms(3)]); + + +load /media/storage/Data/LANDSEA/GPCC_LSM.mat +lm_prec = spataggmn(inpt, gpcc_lsm, 1, 'clms', [1 2 3], 'method', 'wmean'); + +mask = gpcc_lsm; +mask(181:end,:) = 0; +nh_prec = spataggmn(inpt, mask, 1, 'clms', [1 2 3], 'method', 'wmean'); + +mask = gpcc_lsm; +mask(1:180,:) = 0; +sh_prec = spataggmn(inpt, mask, 1, 'clms', [1 2 3], 'method', 'wmean'); + +mask = gpcc_lsm; +mask(1:149, :) = 0; % Remove all gridpoints > 15°N +mask(212:end, :) = 0; +tr_prec = spataggmn(inpt, mask, 1, 'clms', [1 2 3], 'method', 'wmean'); + + + +load continents.asc +cont_prec = spataggmn(inpt, continents, [7 6 3 8 9 1], 'clms', [1 2 3], 'method', 'wmean'); + + +mnthly_ts = [lm_prec(2:end, 1:4) nh_prec(2:end,4) sh_prec(2:end,4) tr_prec(2:end,4) cont_prec(2:end, 4:end)]; + +mnth = mnthly_ts(:,1); +yr = mnthly_ts(:,2); + +for i = 1:length(mnth) + nrd = eomday(yr(i), mnth(i)); + mnthly_ts(i,4:end) = mnthly_ts(i,4:end)/nrd; +end + +ann_ts = tsmean(mnthly_ts, 'annual', 'clms', [1 2 4], 'method', 'twmean'); +mnth_ts = tsmean(mnthly_ts, 'monthly', 'clms', [1 2 4]); + + + + diff --git a/crt_mnth_wb.m b/crt_mnth_wb.m new file mode 100644 index 0000000..167a28b --- /dev/null +++ b/crt_mnth_wb.m @@ -0,0 +1,186 @@ +function wb = crt_mnth_wb(set, cswitch) + +% A = area_wghts(0.25:0.5:179.75, 0.5); +% A = A'; +% A = A*ones(1,720); + +if cswitch == 1 + if strcmp(set, 'cfsr') + load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat + P_l = comp_glob_quant(cfsr_prec, [1989 2006], 1, 'monthly', [4 5 9], -9999, 1); +% P_o = comp_glob_quant(cfsr_prec, [1989 2006], 2, 'monthly', [4 5 9], -9999, 1); + clear cfsr_prec + + load /media/storage/Data/Evaporation/CFSR/CFSR_ET.mat + ET_l = comp_glob_quant(cfsr_et, [1989 2006], 1, 'monthly', [4 5 9], -9999, 1); +% ET_o = comp_glob_quant(cfsr_et, [1989 2006], 2, 'monthly', [4 5 9], -9999, 1); + clear cfsr_et + + load /media/storage/Data/Runoff/CFSR/CFSR_R.mat + R = comp_glob_quant(cfsr_r, [1989 2006], 1, 'monthly', [4 5 9], 'NaN', 1); + clear cfsr_r + + load /media/storage/Data/Mflux/CFSR/CFSR_VIMFD.mat + DQ_l = comp_glob_quant(cfsr_vimfd, [1989 2006], 1, 'monthly', [4 5 9], -9999, 1); +% DQ_o = comp_glob_quant(cfsr_vimfd, [1989 2006], 2, 'monthly', [4 5 9], -9999, 1); + clear cfsr_vimfd + + load /media/storage/Data/Total_water_atm/CFSR/CFSR_TQV.mat + dwdt = central_diff(cfsr_tqv, [1989, 2006], [4 5 9]); + DW_l = comp_glob_quant(dwdt, [1989 2006], 1, 'monthly', [1 2 3], -9999, 1); +% DW_o = comp_glob_quant(dwdt, [1989 2006], 2, 'monthlyl', [1 2 3], -9999, 1); + clear cfsr_tqv dwdt + +elseif strcmp(set, 'merra') + load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat + P_l = comp_glob_quant(merra_prec, [1989 2006], 1, 'monthly', [4 5 9], -9999, 1); +% P_o = comp_glob_quant(merra_prec, [1989 2006], 2, 'annual', [4 5 9], -9999, 1); + clear merra_prec + + load /media/storage/Data/Evaporation/MERRA/MERRA_ET.mat + ET_l = comp_glob_quant(merra_et, [1989 2006], 1, 'monthly', [4 5 9], -9999, 1); +% ET_o = comp_glob_quant(merra_et, [1989 2006], 2, 'annual', [4 5 9], -9999, 1); + clear merra_et + + load /media/storage/Data/Runoff/MERRA/MERRA_R.mat + R = comp_glob_quant(merra_r, [1989 2006], 1, 'monthly', [4 5 9], 'NaN', 1); + clear merra_r + + load /media/storage/Data/Mflux/MERRA/MERRA_VIMFD.mat + DQ_l = comp_glob_quant(merra_vimfd, [1989 2006], 1, 'monthly', [4 5 9], -9999, 1); +% DQ_o = comp_glob_quant(merra_vimfd, [1989 2006], 2, 'annual', [4 5 9], -9999, 1); + clear merra_vimfd + + load /media/storage/Data/Total_water_atm/MERRA/CFSR_TQV.mat + dwdt = centra_diff(merra_tqv, [1989, 2006], [4 5 9]); + DW_l = comp_glob_quant(dwdt, [1989 2006], 1, 'monthly', [1 2 3], -9999, 1); +% DW_o = comp_glob_quant(dwdt, [1989 2006], 2, 'annual', [1 2 3], -9999, 1); + clear merra_tqv dwdt + +elseif strcmp(set, 'ecmwf') + load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat + P_l = comp_glob_quant(ecmwf_prec, [1989 2006], 1, 'monthly', [4 5 9], -9999, 1); +% P_o = comp_glob_quant(ecmwf_prec, [1989 2006], 2, 'annual', [4 5 9], -9999, 1); + clear ecmwf_prec + + load /media/storage/Data/Evaporation/ECMWF/ECMWF_ET.mat + ET_l = comp_glob_quant(ecmwf_et, [1989 2006], 1, 'monthly', [4 5 9], -9999, 1); +% ET_o = comp_glob_quant(ecmwf_et, [1989 2006], 2, 'annual', [4 5 9], -9999, 1); + clear ecmwf_et + + load /media/storage/Data/Runoff/ECMWF/ECMWF_R.mat + R = comp_glob_quant(ecmwf_r, [1989 2006], 1, 'monthly', [4 5 9], 'NaN', 1); + clear ecmwf_r + + load /media/storage/Data/Mflux/ECMWF/ECMWF_VIMFD.mat + DQ_l = comp_glob_quant(ecmwf_vimfd, [1989 2006], 1, 'monthly', [4 5 9], -9999, 1); +% DQ_o = comp_glob_quant(ecmwf_vimfd, [1989 2006], 2, 'annual', [4 5 9], -9999, 1); + clear ecmwf_vimfd + + load /media/storage/Data/Total_water_atm/ECMWF/ECMWF_TQV.mat + dwdt = central_diff(ecmwf_tqv, [1989, 2006], [4 5 9]); + DW_l = comp_glob_quant(dwdt, [1989 2006], 1, 'monthly', [1 2 3], -9999, 1); +% DW_o = comp_glob_quant(dwdt, [1989 2006], 2, 'annual', [1 2 3], -9999, 1); + clear ecmwf_tqv dwdt + wb = [P_l' ET_l' R' DQ_l' DW_l']; + end +elseif cswitch == 2 + + if strcmp(set, 'cfsr') + load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat + load /media/storage/Data/Evaporation/CFSR/CFSR_ET.mat + + for i = 1:length(cfsr_et) + emnp{i,1} = cfsr_et{i,4}; + emnp{i,2} = cfsr_et{i,5}; + emnp{i,3} = cfsr_et{i,9} - cfsr_prec{i,9}; + end + + E_P = comp_cont_quant(emnp, [1989 2006], 'monthly', [1 2 3], -9999, 1); + clear cfsr_* emnp + + + load /media/storage/Data/Runoff/CFSR/CFSR_R.mat + R = comp_cont_quant(cfsr_r, [1989 2006], 'monthly', [4 5 9], 'NaN', 1); + clear cfsr_r + + load /media/storage/Data/Mflux/CFSR/CFSR_VIMFD.mat + DQ_l = comp_cont_quant(cfsr_vimfd, [1989 2006],'monthly', [4 5 9], -9999, 1); +% DQ_o = comp_glob_quant(cfsr_vimfd, [1989 2006], 2, 'monthly', [4 5 9], -9999, 1); + clear cfsr_vimfd + + load /media/storage/Data/Total_water_atm/CFSR/CFSR_TQV.mat + dwdt = central_diff(cfsr_tqv, [1989, 2006], [4 5 9]); + DW_l = comp_cont_quant(dwdt, [1989 2006], 'monthly', [1 2 3], -9999, 1); +% DW_o = comp_glob_quant(dwdt, [1989 2006], 2, 'monthlyl', [1 2 3], -9999, 1); + clear cfsr_tqv dwdt + +elseif strcmp(set, 'merra') + load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat + load /media/storage/Data/Evaporation/MERRA/MERRA_ET.mat + + for i = 1:length(merra_et) + emnp{i,1} = merra_et{i,4}; + emnp{i,2} = merra_et{i,5}; + emnp{i,3} = merra_et{i,9} - merra_prec{i,9}; + end + E_P = comp_cont_quant(emnp, [1989 2006], 'monthly', [1 2 3], -9999, 1); + clear merra_* emnp + + + load /media/storage/Data/Runoff/MERRA/MERRA_R.mat + R = comp_cont_quant(merra_r, [1989 2006], 'monthly', [4 5 9], 'NaN', 1); + clear merra_r + + load /media/storage/Data/Mflux/MERRA/MERRA_VIMFD.mat + DQ_l = comp_cont_quant(merra_vimfd, [1989 2006], 'monthly', [4 5 9], -9999, 1); +% DQ_o = comp_glob_quant(merra_vimfd, [1989 2006], 2, 'annual', [4 5 9], -9999, 1); + clear merra_vimfd + + load /media/storage/Data/Total_water_atm/MERRA/CFSR_TQV.mat + dwdt = centra_diff(merra_tqv, [1989, 2006], [4 5 9]); + DW_l = comp_cont_quant(dwdt, [1989 2006], 'monthly', [1 2 3], -9999, 1); +% DW_o = comp_glob_quant(dwdt, [1989 2006], 2, 'annual', [1 2 3], -9999, 1); + clear merra_tqv dwdt + +elseif strcmp(set, 'ecmwf') + + load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat + load /media/storage/Data/Evaporation/ECMWF/ECMWF_ET.mat + + for i = 1:length(ecmwf_et) + emnp{i,1} = ecmwf_et{i,4}; + emnp{i,2} = ecmwf_et{i,5}; + emnp{i,3} = ecmwf_et{i,9} - ecmwf_prec{i,9}; + end + E_P = comp_cont_quant(emnp, [1989 2006], 'monthly', [1 2 3], -9999, 1); + clear ecmwf_* emnp + + load /media/storage/Data/Runoff/ECMWF/ECMWF_R.mat + R = comp_cont_quant(ecmwf_r, [1989 2006], 'monthly', [4 5 9], 'NaN', 1); + clear ecmwf_r + + load /media/storage/Data/Mflux/ECMWF/ECMWF_VIMFD.mat + DQ_l = comp_cont_quant(ecmwf_vimfd, [1989 2006], 'monthly', [4 5 9], -9999, 1); +% DQ_o = comp_glob_quant(ecmwf_vimfd, [1989 2006], 2, 'annual', [4 5 9], -9999, 1); + clear ecmwf_vimfd + + load /media/storage/Data/Total_water_atm/ECMWF/ECMWF_TQV.mat + dwdt = central_diff(ecmwf_tqv, [1989, 2006], [4 5 9]); + DW_l = comp_cont_quant(dwdt, [1989 2006], 'monthly', [1 2 3], -9999, 1); +% DW_o = comp_glob_quant(dwdt, [1989 2006], 2, 'annual', [1 2 3], -9999, 1); + clear ecmwf_tqv dwdt + end + + wb{1} = E_P; + wb{2} = R; + wb{3} = DQ_l; + wb{4} = DW_l; + +end + + + + + + diff --git a/crt_mnthly_taylor.m b/crt_mnthly_taylor.m new file mode 100644 index 0000000..35eae8f --- /dev/null +++ b/crt_mnthly_taylor.m @@ -0,0 +1,151 @@ +% R_ja = []; +% R_ju = []; +% E_ja = []; +% E_ju = []; +% S_ja = []; +% S_ju = []; +% +% load /media/storage/Data/Precipitation/GPCC/GPCC_PRECv4.0.mat +% [R_ja, R_ju, E_ja, E_ju, S_ja, S_ju] = crt_taylorARSCHFOTZE(R_ja, R_ju, E_ja, E_ju, S_ja, S_ju, 1, gpcc_prec, gpcc_prec); +% +% sprintf('GPCC done') +% +% load /media/storage/Data/Precipitation/GPCP/GPCP_PRECv2.1.mat +% [R_ja, R_ju, E_ja, E_ju, S_ja, S_ju] = crt_taylorARSCHFOTZE(R_ja, R_ju, E_ja, E_ju, S_ja, S_ju, 2, gpcc_prec, gpcp_prec); +% clear gpcp_prec +% sprintf('GPCP done') +% +% load /media/storage/Data/Precipitation/CRU3/CRU3_PRECv3.0.mat +% [R_ja, R_ju, E_ja, E_ju, S_ja, S_ju] = crt_taylorARSCHFOTZE(R_ja, R_ju, E_ja, E_ju, S_ja, S_ju, 3, gpcc_prec, cru_prec); +% clear cru_prec +% sprintf('CRU done') +% +% load /media/storage/Data/Precipitation/CPC/CPC_PREC.mat +% [R_ja, R_ju, E_ja, E_ju, S_ja, S_ju] = crt_taylorARSCHFOTZE(R_ja, R_ju, E_ja, E_ju, S_ja, S_ju, 4, gpcc_prec, cpc_prec); +% clear cpc_prec +% sprintf('CPC done') +% +% load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +% [R_ja, R_ju, E_ja, E_ju, S_ja, S_ju] = crt_taylorARSCHFOTZE(R_ja, R_ju, E_ja, E_ju, S_ja, S_ju, 5, gpcc_prec, ecmwf_prec); +% clear ecmwf_prec +% sprintf('ECMWF done') +% +% +% load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat +% [R_ja, R_ju, E_ja, E_ju, S_ja, S_ju] = crt_taylorARSCHFOTZE(R_ja, R_ju, E_ja, E_ju, S_ja, S_ju, 6, gpcc_prec, merra_prec); +% clear merra_prec +% sprintf('MERRA done') +% +% +% load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat +% [R_ja, R_ju, E_ja, E_ju, S_ja, S_ju] = crt_taylorARSCHFOTZE(R_ja, R_ju, E_ja, E_ju, S_ja, S_ju, 7, gpcc_prec, cfsr_prec); +% clear cfsr_prec +% clear gpcc_prec +% sprintf('CFSR done') +% % +% % keyboard +% save taylor_stats.mat +clear all +close all +load taylor_stats.mat + +for i = 1:10 + E_ja{i} = E_ja{i}./(S_ja{i}(:,1)*ones(1,7)); + S_ja{i} = S_ja{i}./(S_ja{i}(:,1)*ones(1,7)); + + E_ju{i} = E_ju{i}./(S_ju{i}(:,1)*ones(1,7)); + S_ju{i} = S_ju{i}./(S_ju{i}(:,1)*ones(1,7)); +end + + + +fname_ja{1} = 'tlr_glb_ja.eps'; +fname_ja{2} = 'tlr_nh_ja.eps'; +fname_ja{3} = 'tlr_sh_ja.eps'; +fname_ja{4} = 'tlr_tr_ja.eps'; +fname_ja{5} = 'tlr_na_ja.eps'; +fname_ja{6} = 'tlr_sa_ja.eps'; +fname_ja{7} = 'tlr_e_ja.eps'; +fname_ja{8} = 'tlr_af_ja.eps'; +fname_ja{9} = 'tlr_as_ja.eps'; +fname_ja{10} = 'tlr_au_ja.eps' + + + +fname_ju{1} = 'tlr_glb_ju.eps'; +fname_ju{2} = 'tlr_nh_ju.eps'; +fname_ju{3} = 'tlr_sh_ju.eps'; +fname_ju{4} = 'tlr_tr_ju.eps'; +fname_ju{5} = 'tlr_na_ju.eps'; +fname_ju{6} = 'tlr_sa_ju.eps'; +fname_ju{7} = 'tlr_e_ju.eps'; +fname_ju{8} = 'tlr_af_ju.eps'; +fname_ju{9} = 'tlr_as_ju.eps'; +fname_ju{10} = 'tlr_au_ju.eps'; + +tle_ja{1} = 'Global land January'; +tle_ja{2} = 'Northern hemisphere January'; +tle_ja{3} = 'Southern hemisphere January'; +tle_ja{4} = '15S - 15N January'; +tle_ja{5} = 'North America January'; +tle_ja{6} = 'South America January'; +tle_ja{7} = 'Europe January'; +tle_ja{8} = 'Africa January'; +tle_ja{9} = 'Asia January'; +tle_ja{10} = 'Australia January'; + +tle_ju{1} = 'Global land July'; +tle_ju{2} = 'Northern hemisphere July'; +tle_ju{3} = 'Southern hemisphere July'; +tle_ju{4} = '15S - 15N July'; +tle_ju{5} = 'North America July'; +tle_ju{6} = 'South America July'; +tle_ju{7} = 'Europe July'; +tle_ju{8} = 'Africa July'; +tle_ju{9} = 'Asia July'; +tle_ju{10} = 'Australia July'; + + +%% +for i = 1:10 + h = figure('papersize', [4.5 4], 'paperunits', 'centimeters') + axis([0 1 0 2]); + + [hp ht axl] = taylordiag_new(S_ja{1,i}, E_ja{1,i}, R_ja{1,i}, ... + 'labelDTA', 0, 'tickrms', 0:0.25:1.25, 'titleSTD', 1, 'showlabelsRMS', 1); + if i == 1 + grr = legend(hp(1,:), 'GPCC', 'GPCP', 'CRU', 'CPC', 'INTERIM', 'MERRA', 'CFSR'); + set(grr, 'fontsize', 20,'fontweight','bold') + set(grr, 'location', 'Best') + keyboard + end +% xlabel(tle_ja{i}, 'fontsize', 28) +% set(get(axl(1,1).handle, 'Ylabel'), 'standard deviation (normalized)', 'fontsize', 14, 'Position', [1 2 3]) + print(fname_ja{i}, '-depsc2') + close all + + h = figure('papersize', [4.5 4], 'paperunits', 'centimeters') + axis([0 1 0 2]); + [hp ht axl] = taylordiag_new(S_ju{1,i}, E_ju{1,i}, R_ju{1,i}, ... + 'labelDTA', 0, 'tickRMS', 0:0.25:1.25, 'titleSTD', 1, 'showlabelsRMS', 1); +% xlabel(tle_ju{i}, 'fontsize', 28) +% ylabel('standard deviation (normalized)', 'fontsize', 14) + print(fname_ju{i}, '-depsc2') + close all + +end + + + + + + + + + + + + + + + diff --git a/crt_prec_ts_gmt.m b/crt_prec_ts_gmt.m new file mode 100644 index 0000000..9c123bf --- /dev/null +++ b/crt_prec_ts_gmt.m @@ -0,0 +1,248 @@ +load /media/storage/Data/Precipitation/GPCC/GPCC_PRECv4.0.mat +gpcc_glob_ann = comp_glob_quant(gpcc_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +gpcc_glob_mon = comp_glob_quant(gpcc_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); + +gpcc_nhsh_ann = comp_glob_quant(gpcc_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +gpcc_nhsh_mon = comp_glob_quant(gpcc_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); + +gpcc_trpc_ann = comp_glob_quant(gpcc_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +gpcc_trpc_mon = comp_glob_quant(gpcc_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); + +gpcc_cont_ann = comp_cont_quant(gpcc_prec, [1989 2006], 'annual', [7 6 3 8 9 1], [4 5 9], -9999, 1); +gpcc_cont_mon = comp_cont_quant(gpcc_prec, [1989 2006], 'monthly', [7 6 3 8 9 1], [4 5 9], -9999, 1); +clear gpcc_prec +% +% +% gpcc_ann(:,1) = gpcc_glob_ann(:,2); +% gpcc_ann(:,2) = gpcc_nhsh_ann(:,2); +% gpcc_ann(:,3) = gpcc_nhsh_ann(:,3); +% gpcc_ann(:,4) = gpcc_cont_ann(:,2); +% gpcc_ann(:,5) = gpcc_cont_ann(:,3); +% gpcc_ann(:,6) = gpcc_cont_ann(:,4); +% gpcc_ann(:,7) = gpcc_cont_ann(:,5); +% gpcc_ann(:,8) = gpcc_cont_ann(:,6); +% gpcc_ann(:,9) = gpcc_cont_ann(:,7); +% gpcc_ann(:,10) = gpcc_trpc_ann(:,2); +% +% gpcc_mon(:,1) = gpcc_glob_mon(:,2); +% gpcc_mon(:,2) = gpcc_nhsh_mon(:,2); +% gpcc_mon(:,3) = gpcc_nhsh_mon(:,3); +% gpcc_mon(:,4) = gpcc_cont_mon(:,2); +% gpcc_mon(:,5) = gpcc_cont_mon(:,3); +% gpcc_mon(:,6) = gpcc_cont_mon(:,4); +% gpcc_mon(:,7) = gpcc_cont_mon(:,5); +% gpcc_mon(:,8) = gpcc_cont_mon(:,6); +% gpcc_mon(:,9) = gpcc_cont_mon(:,7); +% gpcc_mon(:,10) = gpcc_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% load /media/storage/Data/Precipitation/GPCP/GPCP_PRECv2.1.mat +% gpcp_glob_ann = comp_glob_quant(gpcp_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% gpcp_glob_mon = comp_glob_quant(gpcp_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% gpcp_nhsh_ann = comp_glob_quant(gpcp_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% gpcp_nhsh_mon = comp_glob_quant(gpcp_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% gpcp_trpc_ann = comp_glob_quant(gpcp_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% gpcp_trpc_mon = comp_glob_quant(gpcp_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% gpcp_cont_ann = comp_cont_quant(gpcp_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% gpcp_cont_mon = comp_cont_quant(gpcp_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% clear gpcp_prec +% +% gpcp_ann(:,1) = gpcp_glob_ann(:,2); +% gpcp_ann(:,2) = gpcp_nhsh_ann(:,2); +% gpcp_ann(:,3) = gpcp_nhsh_ann(:,3); +% gpcp_ann(:,4) = gpcp_cont_ann(:,2); +% gpcp_ann(:,5) = gpcp_cont_ann(:,3); +% gpcp_ann(:,6) = gpcp_cont_ann(:,4); +% gpcp_ann(:,7) = gpcp_cont_ann(:,5); +% gpcp_ann(:,8) = gpcp_cont_ann(:,6); +% gpcp_ann(:,9) = gpcp_cont_ann(:,7); +% gpcp_ann(:,10) = gpcp_trpc_ann(:,2); +% +% gpcp_mon(:,1) = gpcp_glob_mon(:,2); +% gpcp_mon(:,2) = gpcp_nhsh_mon(:,2); +% gpcp_mon(:,3) = gpcp_nhsh_mon(:,3); +% gpcp_mon(:,4) = gpcp_cont_mon(:,2); +% gpcp_mon(:,5) = gpcp_cont_mon(:,3); +% gpcp_mon(:,6) = gpcp_cont_mon(:,4); +% gpcp_mon(:,7) = gpcp_cont_mon(:,5); +% gpcp_mon(:,8) = gpcp_cont_mon(:,6); +% gpcp_mon(:,9) = gpcp_cont_mon(:,7); +% gpcp_mon(:,10) = gpcp_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% +% +% load /media/storage/Data/Precipitation/CRU3/CRU3_PRECv3.0.mat +% cru_glob_ann = comp_glob_quant(cru_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% cru_glob_mon = comp_glob_quant(cru_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% cru_nhsh_ann = comp_glob_quant(cru_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% cru_nhsh_mon = comp_glob_quant(cru_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% cru_trpc_ann = comp_glob_quant(cru_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% cru_trpc_mon = comp_glob_quant(cru_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% cru_cont_ann = comp_cont_quant(cru_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% cru_cont_mon = comp_cont_quant(cru_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% clear cru_prec +% +% cru_ann(:,1) = cru_glob_ann(:,2); +% cru_ann(:,2) = cru_nhsh_ann(:,2); +% cru_ann(:,3) = cru_nhsh_ann(:,3); +% cru_ann(:,4) = cru_cont_ann(:,2); +% cru_ann(:,5) = cru_cont_ann(:,3); +% cru_ann(:,6) = cru_cont_ann(:,4); +% cru_ann(:,7) = cru_cont_ann(:,5); +% cru_ann(:,8) = cru_cont_ann(:,6); +% cru_ann(:,9) = cru_cont_ann(:,7); +% cru_ann(:,10) = cru_trpc_ann(:,2); +% +% cru_mon(:,1) = cru_glob_mon(:,2); +% cru_mon(:,2) = cru_nhsh_mon(:,2); +% cru_mon(:,3) = cru_nhsh_mon(:,3); +% cru_mon(:,4) = cru_cont_mon(:,2); +% cru_mon(:,5) = cru_cont_mon(:,3); +% cru_mon(:,6) = cru_cont_mon(:,4); +% cru_mon(:,7) = cru_cont_mon(:,5); +% cru_mon(:,8) = cru_cont_mon(:,6); +% cru_mon(:,9) = cru_cont_mon(:,7); +% cru_mon(:,10) = cru_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% +% load /media/storage/Data/Precipitation/CPC/CPC_PREC.mat +% cpc_glob_ann = comp_glob_quant(cpc_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% cpc_glob_mon = comp_glob_quant(cpc_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% cpc_nhsh_ann = comp_glob_quant(cpc_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% cpc_nhsh_mon = comp_glob_quant(cpc_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% cpc_trpc_ann = comp_glob_quant(cpc_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% cpc_trpc_mon = comp_glob_quant(cpc_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% cpc_cont_ann = comp_cont_quant(cpc_prec, [1989 2006], 'annual',[7 6 3 8 9 1], [4 5 9], -9999, 1); +% cpc_cont_mon = comp_cont_quant(cpc_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% clear cpc_prec +% +% +% cpc_ann(:,1) = cpc_glob_ann(:,2); +% cpc_ann(:,2) = cpc_nhsh_ann(:,2); +% cpc_ann(:,3) = cpc_nhsh_ann(:,3); +% cpc_ann(:,4) = cpc_cont_ann(:,2); +% cpc_ann(:,5) = cpc_cont_ann(:,3); +% cpc_ann(:,6) = cpc_cont_ann(:,4); +% cpc_ann(:,7) = cpc_cont_ann(:,5); +% cpc_ann(:,8) = cpc_cont_ann(:,6); +% cpc_ann(:,9) = cpc_cont_ann(:,7); +% cpc_ann(:,10) = cpc_trpc_ann(:,2); +% +% cpc_mon(:,1) = cpc_glob_mon(:,2); +% cpc_mon(:,2) = cpc_nhsh_mon(:,2); +% cpc_mon(:,3) = cpc_nhsh_mon(:,3); +% cpc_mon(:,4) = cpc_cont_mon(:,2); +% cpc_mon(:,5) = cpc_cont_mon(:,3); +% cpc_mon(:,6) = cpc_cont_mon(:,4); +% cpc_mon(:,7) = cpc_cont_mon(:,5); +% cpc_mon(:,8) = cpc_cont_mon(:,6); +% cpc_mon(:,9) = cpc_cont_mon(:,7); +% cpc_mon(:,10) = cpc_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +% ecmwf_glob_ann = comp_glob_quant(ecmwf_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% ecmwf_glob_mon = comp_glob_quant(ecmwf_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% ecmwf_nhsh_ann = comp_glob_quant(ecmwf_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% ecmwf_nhsh_mon = comp_glob_quant(ecmwf_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% ecmwf_trpc_ann = comp_glob_quant(ecmwf_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% ecmwf_trpc_mon = comp_glob_quant(ecmwf_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% ecmwf_cont_ann = comp_cont_quant(ecmwf_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% ecmwf_cont_mon = comp_cont_quant(ecmwf_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% clear ecmwf_prec +% +% +% ecmwf_ann(:,1) = ecmwf_glob_ann(:,2); +% ecmwf_ann(:,2) = ecmwf_nhsh_ann(:,2); +% ecmwf_ann(:,3) = ecmwf_nhsh_ann(:,3); +% ecmwf_ann(:,4) = ecmwf_cont_ann(:,2); +% ecmwf_ann(:,5) = ecmwf_cont_ann(:,3); +% ecmwf_ann(:,6) = ecmwf_cont_ann(:,4); +% ecmwf_ann(:,7) = ecmwf_cont_ann(:,5); +% ecmwf_ann(:,8) = ecmwf_cont_ann(:,6); +% ecmwf_ann(:,9) = ecmwf_cont_ann(:,7); +% ecmwf_ann(:,10) = ecmwf_trpc_ann(:,2); +% +% ecmwf_mon(:,1) = ecmwf_glob_mon(:,2); +% ecmwf_mon(:,2) = ecmwf_nhsh_mon(:,2); +% ecmwf_mon(:,3) = ecmwf_nhsh_mon(:,3); +% ecmwf_mon(:,4) = ecmwf_cont_mon(:,2); +% ecmwf_mon(:,5) = ecmwf_cont_mon(:,3); +% ecmwf_mon(:,6) = ecmwf_cont_mon(:,4); +% ecmwf_mon(:,7) = ecmwf_cont_mon(:,5); +% ecmwf_mon(:,8) = ecmwf_cont_mon(:,6); +% ecmwf_mon(:,9) = ecmwf_cont_mon(:,7); +% ecmwf_mon(:,10) = ecmwf_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat +% merra_glob_ann = comp_glob_quant(merra_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% merra_glob_mon = comp_glob_quant(merra_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% merra_nhsh_ann = comp_glob_quant(merra_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% merra_nhsh_mon = comp_glob_quant(merra_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% merra_trpc_ann = comp_glob_quant(merra_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% merra_trpc_mon = comp_glob_quant(merra_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% merra_cont_ann = comp_cont_quant(merra_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% merra_cont_mon = comp_cont_quant(merra_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% clear merra_prec +% +% +% merra_ann(:,1) = merra_glob_ann(:,2); +% merra_ann(:,2) = merra_nhsh_ann(:,2); +% merra_ann(:,3) = merra_nhsh_ann(:,3); +% merra_ann(:,4) = merra_cont_ann(:,2); +% merra_ann(:,5) = merra_cont_ann(:,3); +% merra_ann(:,6) = merra_cont_ann(:,4); +% merra_ann(:,7) = merra_cont_ann(:,5); +% merra_ann(:,8) = merra_cont_ann(:,6); +% merra_ann(:,9) = merra_cont_ann(:,7); +% merra_ann(:,10) = merra_trpc_ann(:,2); +% +% merra_mon(:,1) = merra_glob_mon(:,2); +% merra_mon(:,2) = merra_nhsh_mon(:,2); +% merra_mon(:,3) = merra_nhsh_mon(:,3); +% merra_mon(:,4) = merra_cont_mon(:,2); +% merra_mon(:,5) = merra_cont_mon(:,3); +% merra_mon(:,6) = merra_cont_mon(:,4); +% merra_mon(:,7) = merra_cont_mon(:,5); +% merra_mon(:,8) = merra_cont_mon(:,6); +% merra_mon(:,9) = merra_cont_mon(:,7); +% merra_mon(:,10) = merra_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat +% cfsr_glob_ann = comp_glob_quant(cfsr_prec, [1989 2006],1, 'annual', [4 5 9], -9999, 1); +% cfsr_glob_mon = comp_glob_quant(cfsr_prec, [1989 2006],1, 'monthly', [4 5 9], -9999, 1); +% +% cfsr_nhsh_ann = comp_glob_quant(cfsr_prec, [1989 2006],4, 'annual', [4 5 9], -9999, 1); +% cfsr_nhsh_mon = comp_glob_quant(cfsr_prec, [1989 2006],4, 'monthly', [4 5 9], -9999, 1); +% +% cfsr_trpc_ann = comp_glob_quant(cfsr_prec, [1989 2006],5, 'annual', [4 5 9], -9999, 1); +% cfsr_trpc_mon = comp_glob_quant(cfsr_prec, [1989 2006],5, 'monthly', [4 5 9], -9999, 1); +% +% cfsr_cont_ann = comp_cont_quant(cfsr_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999, 1); +% cfsr_cont_mon = comp_cont_quant(cfsr_prec, [1989 2006], 'monthly',[7 6 3 +% 8 9 1], [4 5 9], -9999, 1); \ No newline at end of file diff --git a/crt_seasonal_taylor.m b/crt_seasonal_taylor.m new file mode 100644 index 0000000..af71e35 --- /dev/null +++ b/crt_seasonal_taylor.m @@ -0,0 +1,72 @@ +load /media/storage/Data/Precipitation/GPCC/GPCC_PRECv4.0.mat +P_gpcc = comp_spat_mean(gpcc_prec, [1989 2006], 'monthly_2', [4,5,9], -9999,1); +clear gpcc* +sprintf('EIER') +load /media/storage/Data/Precipitation/CRU3/CRU3_PRECv3.0.mat +P_cru = comp_spat_mean(cru_prec, [1989 2006], 'monthly_2', [4,5,9], -9999,1); +clear cru* + +load /media/storage/Data/Precipitation/CPC/CPC_PREC.mat +P_cpc = comp_spat_mean(cpc_prec, [1989 2006], 'monthly_2', [4,5,9], -9999,1); +clear cpc* + +load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +P_ecmwf = comp_spat_mean(ecmwf_prec, [1989 2006], 'monthly_2', [4,5,9], -9999,1); +clear ecmwf* + +load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat +P_merra = comp_spat_mean(merra_prec, [1989 2006], 'monthly_2', [4,5,9], -9999,1); +clear merra* + +load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat +P_cfsr = comp_spat_mean(cfsr_prec, [1989 2006], 'monthly_2', [4,5,9], -9999,1); +clear cfsr* + + +mnth(1) = 1; +mnth(2) = 7; + + +for k = 1:2 +for i = 1:18 + + [Rt Et sigt] = taylor_stats_2d(P_gpcc{i,mnth(k)}, 3, -9999, P_cru{i,mnth(k)}, ... + P_cpc{i,mnth(k)}, P_ecmwf{i,mnth(k)}, P_merra{i,mnth(k)}, P_cfsr{i,mnth(k)}); + if i == 1 + for j = 1:11 + R{k,j} = Rt(j,:); + sig{k,j} = sigt(j,:); + E{k,j} = Et(j,:); + end + else + for j = 1:11 + R{k,j} = [R{k,j}; Rt(j,:)]; + E{k,j} = [E{k,j}; Et(j,:)]; + sig{k,j} = [sig{k,j}; sigt(j,:)]; + + end + end +end +end + +for j = 1:2 + for i = 1:11 + E_n{j,i} = E{j,i}./(sig{j,i}(:,1)*ones(1,6)); + sig_n{j,i} = sig{j,i}./(sig{j,i}(:,1)*ones(1,6)); + end +end + + +% for j = 1:4 +% for i = 1:11 +% mn_s{j}(i,:) = mean(sig{j,i},1); +% mn_R{j}(i,:) = mean(R{j,i},1); +% mn_E{j}(i,:) = (mn_s{j}(i,:).^2 + mn_s{j}(i,1).^2 - ... +% 2*mn_s{j}(i,:).*mn_s{j}(i,1).*mn_R{j}(i,:)).^(1/2); +% +% end +% mn_E{j} = mn_E{j}./(mn_s{j}(:,1)*ones(1,6)); +% mn_s{j} = mn_s{j}./(mn_s{j}(:,1)*ones(1,6)); +% end + + diff --git a/crt_seasonal_ts.m b/crt_seasonal_ts.m new file mode 100644 index 0000000..be3cb70 --- /dev/null +++ b/crt_seasonal_ts.m @@ -0,0 +1,578 @@ +clear all + + +% Ref 1 +load /media/storage/Data/Precipitation/GPCC/GPCC_PRECv4.0.mat +gpcc_mean_ann = spat_mean(gpcc_prec, [1989 2006], 'annual', -9999); +gpcc_mean_snl = spat_mean(gpcc_prec, [1990 2006], 'seasonal', -9999); + +longi = gpcc_prec{1,8}; +lati = gpcc_prec{1,7}; + +clear gpcc_prec + +% Ref 2 +load /media/storage/Data/Precipitation/CPC/CPC_PREC.mat +cpc_mean_ann = spat_mean(cpc_prec, [1989 2006], 'annual', -9999); +cpc_mean_snl = spat_mean(cpc_prec, [1990 2006], 'seasonal', -9999); +clear cpc_prec + +% Reana 1 +load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +ecmwf_mean_ann = spat_mean(ecmwf_prec, [1989 2006], 'annual', -9999); +ecmwf_mean_snl = spat_mean(ecmwf_prec, [1990 2006], 'seasonal', -9999); +clear ecmwf_prec + +% Reana 2 +load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat +merra_mean_ann = spat_mean(merra_prec, [1989 2006], 'annual', -9999); +merra_mean_snl = spat_mean(merra_prec, [1990 2006], 'seasonal', -9999); +clear merra_prec + +% Reana 3 +load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat +cfsr_mean_ann = spat_mean(cfsr_prec, [1989 2006], 'annual', -9999); +cfsr_mean_snl = spat_mean(cfsr_prec, [1990 2006], 'seasonal', -9999); +clear cfsr_prec +% +for i = 1:4 + % Removing the missing values from GPCC in all datasets + ecmwf_mean_snl{i}(gpcc_mean_snl{i} == -9999) = NaN; + merra_mean_snl{i}(gpcc_mean_snl{i} == -9999) = NaN; + cfsr_mean_snl{i}(gpcc_mean_snl{i} == -9999) = NaN; + cpc_mean_snl{i}(gpcc_mean_snl{i} == -9999) = NaN; + gpcc_mean_snl{i}(gpcc_mean_snl{i} == -9999) = NaN; + % Removing the missing values from CPC in all datasets + gpcc_mean_snl{i}(cpc_mean_snl{i} == -9999) = NaN; + ecmwf_mean_snl{i}(cpc_mean_snl{i} == -9999) = NaN; + merra_mean_snl{i}(cpc_mean_snl{i} == -9999) = NaN; + cfsr_mean_snl{i}(cpc_mean_snl{i} == -9999) = NaN; +end + + +ecmwf_mean_ann(gpcc_mean_ann == -9999) = NaN; +merra_mean_ann(gpcc_mean_ann == -9999) = NaN; +cfsr_mean_ann(gpcc_mean_ann == -9999) = NaN; +cpc_mean_ann(gpcc_mean_ann == -9999) = NaN; +gpcc_mean_ann(gpcc_mean_ann == -9999) = NaN; + +gpcc_mean_ann(cpc_mean_ann == -9999) = NaN; +ecmwf_mean_ann(cpc_mean_ann == -9999) = NaN; +merra_mean_ann(cpc_mean_ann == -9999) = NaN; +cfsr_mean_ann(cpc_mean_ann == -9999) = NaN; + +mask= zeros(360,720); +mask(gpcc_mean_ann == 0) = 1; +% Setting values equal zero to one to compute the relative differences +cpc_mean_ann(mask == 1) = cpc_mean_ann(mask == 1) + 1; +ecmwf_mean_ann(mask == 1) = ecmwf_mean_ann(mask == 1) + 1; +merra_mean_ann(mask == 1) = merra_mean_ann(mask == 1) + 1; +cfsr_mean_ann(mask == 1) = cfsr_mean_ann(mask == 1) + 1; +gpcc_mean_ann(mask == 1) = 1; +% Computing the Differencess +cpc_diff_ann = cpc_mean_ann - gpcc_mean_ann; +ecmwf_diff_ann = ecmwf_mean_ann - gpcc_mean_ann; +merra_diff_ann = merra_mean_ann - gpcc_mean_ann; +cfsr_diff_ann = cfsr_mean_ann - gpcc_mean_ann; + + +cpc_rl_df_ann = cpc_diff_ann./gpcc_mean_ann*100; +ecmwf_rl_df_ann = ecmwf_diff_ann./gpcc_mean_ann*100; +merra_rl_df_ann = merra_diff_ann./gpcc_mean_ann*100; +cfsr_rl_df_ann = cfsr_diff_ann./gpcc_mean_ann*100; + +cpc_diff_ann(isnan(gpcc_mean_ann)) = NaN; +ecmwf_diff_ann(isnan(gpcc_mean_ann)) = NaN; +merra_diff_ann(isnan(gpcc_mean_ann)) = NaN; +cfsr_diff_ann(isnan(gpcc_mean_ann)) = NaN; + +cpc_rl_df_ann(isnan(gpcc_mean_ann)) = NaN; +ecmwf_rl_df_ann(isnan(gpcc_mean_ann)) = NaN; +merra_rl_df_ann(isnan(gpcc_mean_ann)) = NaN; +cfsr_rl_df_ann(isnan(gpcc_mean_ann)) = NaN; + + +cpc_mean_ann(mask == 1) = cpc_mean_ann(mask == 1) - 1; +ecmwf_mean_ann(mask == 1) = ecmwf_mean_ann(mask == 1) - 1; +merra_mean_ann(mask == 1) = merra_mean_ann(mask == 1) - 1; +cfsr_mean_ann(mask == 1) = cfsr_mean_ann(mask == 1) - 1; +gpcc_mean_ann(mask == 1) = 0; + +for i = 1:4 + + mask(360,720); + mask(gpcc_mean_snl{i} == 0) = 1; + % Setting values equal zero to one to compute the relative differences + cpc_mean_snl{i}(mask == 1) = cpc_mean_snl{i}(mask == 1) + 1; + ecmwf_mean_snl{i}(mask == 1) = ecmwf_mean_snl{i}(mask == 1) + 1; + merra_mean_snl{i}(mask == 1) = merra_mean_snl{i}(mask == 1) + 1; + cfsr_mean_snl{i}(mask == 1) = cfsr_mean_snl{i}(mask == 1) + 1; + gpcc_mean_snl{i}(mask == 1) = 1; + + cpc_diff_snl{i} = cpc_mean_snl{i} - gpcc_mean_snl{i}; + ecmwf_diff_snl{i} = ecmwf_mean_snl{i} - gpcc_mean_snl{i}; + merra_diff_snl{i} = merra_mean_snl{i} - gpcc_mean_snl{i}; + cfsr_diff_snl{i} = cfsr_mean_snl{i} - gpcc_mean_snl{i}; + + cpc_rl_df_snl{i} = cpc_diff_snl{i}./gpcc_mean_snl{i}*100; + ecmwf_rl_df_snl{i} = ecmwf_diff_snl{i}./gpcc_mean_snl{i}*100; + merra_rl_df_snl{i} = merra_diff_snl{i}./gpcc_mean_snl{i}*100; + cfsr_rl_df_snl{i} = cfsr_diff_snl{i}./gpcc_mean_snl{i}*100; + + + + cpc_diff_snl{i}(isnan(gpcc_mean_snl{i})) = NaN; + ecmwf_diff_snl{i}(isnan(gpcc_mean_snl{i})) = NaN; + merra_diff_snl{i}(isnan(gpcc_mean_snl{i})) = NaN; + cfsr_diff_snl{i}(isnan(gpcc_mean_snl{i})) = NaN; + + cpc_rl_df_snl{i}(isnan(gpcc_mean_snl{i})) = NaN; + ecmwf_rl_df_snl{i}(isnan(gpcc_mean_snl{i})) = NaN; + merra_rl_df_snl{i}(isnan(gpcc_mean_snl{i})) = NaN; + cfsr_rl_df_snl{i}(isnan(gpcc_mean_snl{i})) = NaN; + + % Subtracting the previously added one + cpc_mean_snl{i}(mask == 1) = cpc_mean_snl{i}(mask == 1) - 1; + ecmwf_mean_snl{i}(mask == 1) = ecmwf_mean_snl{i}(mask == 1) - 1; + merra_mean_snl{i}(mask == 1) = merra_mean_snl{i}(mask == 1) - 1; + cfsr_mean_snl{i}(mask == 1) = cfsr_mean_snl{i}(mask == 1) - 1; + gpcc_mean_snl{i}(mask == 1) = 0; + +end + + +% Transforming the matices into the GMT-Format +sns{1} = 'DJF'; +sns{2} = 'MAM'; +sns{3} = 'JJA'; +sns{4} = 'SON'; + +for i = 1:4 + fname = ['Absolute/gpcc_', sns{i}, '.txt']; + tmp = grid2gmt(gpcc_mean_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Absolute/cpc_', sns{i}, '.txt']; + tmp = grid2gmt(cpc_mean_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Differences/d_cpc_', sns{i}, '.txt']; + tmp = grid2gmt(cpc_diff_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Relative/d_rl_cpc_', sns{i}, '.txt']; + tmp = grid2gmt(cpc_rl_df_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Absolute/ecmwf_', sns{i}, '.txt']; + tmp = grid2gmt(ecmwf_mean_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Differences/d_ecmwf_', sns{i}, '.txt']; + tmp = grid2gmt(ecmwf_diff_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Relative/d_rl_ecmwf_', sns{i}, '.txt']; + tmp = grid2gmt(ecmwf_rl_df_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Absolute/merra_', sns{i}, '.txt']; + tmp = grid2gmt(merra_mean_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Differences/d_merra_', sns{i}, '.txt']; + tmp = grid2gmt(merra_diff_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Relative/d_rl_merra_', sns{i}, '.txt']; + tmp = grid2gmt(merra_rl_df_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Absolute/cfsr_', sns{i}, '.txt']; + tmp = grid2gmt(cfsr_mean_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Differences/d_cfsr_', sns{i}, '.txt']; + tmp = grid2gmt(cfsr_diff_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); + + fname = ['Relative/d_rl_cfsr_', sns{i}, '.txt']; + tmp = grid2gmt(cfsr_rl_df_snl{i}, 0.5); + save(fname, 'tmp', '-ASCII'); +end + +fname = ['Absolute/gpcc_ann.txt']; +tmp = grid2gmt(gpcc_mean_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Absolute/cpc_ann.txt']; +tmp = grid2gmt(cpc_mean_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Differences/d_cpc_ann.txt']; +tmp = grid2gmt(cpc_diff_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Relative/d_rl_cpc_ann.txt']; +tmp = grid2gmt(cpc_rl_df_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Absolute/ecmwf_ann.txt']; +tmp = grid2gmt(ecmwf_mean_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Differences/d_ecmwf_ann.txt']; +tmp = grid2gmt(ecmwf_diff_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Relative/d_rl_ecmwf_ann.txt']; +tmp = grid2gmt(ecmwf_rl_df_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Absolute/merra_ann.txt']; +tmp = grid2gmt(merra_mean_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Differences/d_merra_ann.txt']; +tmp = grid2gmt(merra_diff_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Relative/d_rl_merra_ann.txt']; +tmp = grid2gmt(merra_rl_df_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Absolute/cfsr_ann.txt']; +tmp = grid2gmt(cfsr_mean_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Differences/d_cfsr_ann.txt']; +tmp = grid2gmt(cfsr_diff_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + +fname = ['Relative/d_rl_cfsr_ann.txt']; +tmp = grid2gmt(cfsr_rl_df_ann, 0.5); +save(fname, 'tmp', '-ASCII'); + + + + + + + + + + + + + + + + + + + + + + +% + +% gpcc_mean = spat_mean(gpcc_prec, [1990 1991], 'seasonal', -9999); + + + +% load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +% +% ecmwf_mean = spat_mean(ecmwf_prec, [1981 1995], 'annual', -9999); +% +% clear ecmwf_prec* + +% load /media/storage/Data/Precipitation/MERRA/MERRA_PREC2.mat +% +% merra_mean = spat_mean(merra_prec, [1990 1991], 'annual', -9999); + + + +% load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat + +% cfsr_mean = spat_mean(cfsr_prec, [1990 1991], 'seasonal', -9999); + + + + +% % ecmwf_mean(gpcc_mean == -9999) = -9999; +% for i = 1:4 +% cfsr_mean{i} = cfsr_mean{i}/2; +% cfsr_mean{i}(gpcc_mean{i} == -9999) = -9999; +% % merra_mean(gpcc_mean == -9999) = -9999; +% end + + + +% ecmwf_rel = abs(ecmwf_mean - gpcc_mean)./gpcc_mean; +% merra_rel = abs(merra_mean - gpcc_mean)./gpcc_mean; +% cfsr_rel = abs(cfsr_mean - gpcc_mean)./gpcc_mean; + +% +% ecmwf_rel(gpcc_mean == -9999) = -9999; +% cfsr_rel(gpcc_mean == -9999) = -9999; +% merra_rel(gpcc_mean == -9999) = -9999; + +% load coast +% load /home/lorenz-c/Data/colormaps/precip_cmap.mat +% load /home/lorenz-c/Data/colormaps/rel_precip_cmap.mat + +% x4 = figure +% set(x4, 'papertype', 'a4') +% set(x4,'paperposition',[0.5,0.5,20,28]) +% +% subplot(2,2,1) +% imagesc(cfsr_prec{1,8}, cfsr_prec{1,7}, cfsr_mean{1}-gpcc_mean{1}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-5 5]) +% title('GPCC') +% colormap(rel_precip_cmap) +% +% subplot(2,2,2) +% imagesc(cfsr_prec{1,8}, cfsr_prec{1,7}, cfsr_mean{2}-gpcc_mean{2}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-5 5]) +% title('GPCC') +% colormap(rel_precip_cmap) +% +% subplot(2,2,3) +% imagesc(cfsr_prec{1,8}, cfsr_prec{1,7}, cfsr_mean{3}-gpcc_mean{3}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-5 5]) +% title('GPCC') +% colormap(rel_precip_cmap) +% +% subplot(2,2,4) +% imagesc(cfsr_prec{1,8}, cfsr_prec{1,7}, cfsr_mean{4}-gpcc_mean{4}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-5 5]) +% title('GPCC') +% colormap(rel_precip_cmap) +% % +% % subplot(4,1,2) +% % imagesc(cfsr_prec{1,8}, cfsr_prec{1,7}, ecmwf_mean); +% % axis xy +% % pbaspect([2 1 1]) +% % hold on +% % plot(long, lat, 'k'); +% % caxis([-1 12]) +% % title('ECMWF') +% % colormap(precip_cmap) +% +% subplot(3,1,2) +% imagesc(cfsr_prec{1,8}, cfsr_prec{1,7}, merra_mean); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([0 750]) +% title('MERRA') +% colormap(precip_cmap) +% +% subplot(3,1,3) +% imagesc(cfsr_prec{1,8}, cfsr_prec{1,7}, cfsr_mean); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([0 750]) +% title('CFSR') +% colormap(precip_cmap) +% +% +% x3 = figure +% set(x3, 'papertype', 'a4') +% set(x3,'paperposition',[0.5,0.5,20,28]) +% +% % subplot(2,1,1) +% % imagesc(cfsr_prec{1,8}, cfsr_prec{1,7}, ecmwf_mean - gpcc_mean); +% % axis xy +% % pbaspect([2 1 1]) +% % hold on +% % plot(long, lat, 'k'); +% % caxis([-5 5]) +% % title('ECMWF - GPCC') +% % colormap(rel_precip_cmap) +% +% subplot(2,1,1) +% imagesc(cfsr_prec{1,8}, cfsr_prec{1,7}, merra_mean - gpcc_mean); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-250 250]) +% title('MERRA - GPCC') +% colormap(rel_precip_cmap) +% +% subplot(2,1,2) +% imagesc(cfsr_prec{1,8}, cfsr_prec{1,7}, cfsr_mean - gpcc_mean); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-250 250]) +% title('CFSR - GPCC') +% colormap(rel_precip_cmap) + + + +% x2 = figure +% set(x2, 'papertype', 'a4') +% set(x2,'paperposition',[0.5,0.5,20,28]) +% +% subplot(3,1,1) +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, ecmwf_rel); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([0 1]) +% title('ECMWF - GPCC, rel') +% colormap(precip_cmap) +% +% subplot(3,1,2) +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, merra_rel); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([0 1]) +% title('MERRA - GPCC, rel') +% colormap(precip_cmap) +% +% subplot(3,1,3) +% +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, cfsr_rel); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([0 1]) +% title('CFSR - GPCC, rel') +% colormap(precip_cmap) + + + + +% subplot(4,1,4) +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, cfsr_mean{2}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-1 12]) +% title('CFSR, MAM') +% colormap(precip_cmap) + + + +% x1 = figure +% set(x1, 'papertype', 'a4') +% set(x1,'paperposition',[0.5,0.5,18,26]) +% +% subplot(4,1,1) +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, gpcc_mean{3}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-1 12]) +% title('GPCC, JJA') +% colormap(precip_cmap) +% +% subplot(4,1,2) +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, ecmwf_mean{3}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-1 12]) +% title('ECMWF, JJA') +% colormap(precip_cmap) +% +% subplot(4,1,3) +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, merra_mean{3}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-1 12]) +% title('MERRA, JJA') +% colormap(precip_cmap) +% +% subplot(4,1,4) +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, cfsr_mean{3}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-1 12]) +% title('CFSR, JJA') +% colormap(precip_cmap) +% +% +% +% +% +% x2 = figure +% set(x2, 'papertype', 'a4') +% set(x2,'paperposition',[0.5,0.5,18,26]) +% +% subplot(4,1,1) +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, gpcc_mean{4}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-1 12]) +% title('GPCC, SON') +% colormap(precip_cmap) +% +% subplot(4,1,2) +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, ecmwf_mean{4}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-1 12]) +% title('ECMWF, SON') +% colormap(precip_cmap) +% +% subplot(4,1,3) +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, merra_mean{4}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-1 12]) +% title('MERRA, SON') +% colormap(precip_cmap) +% +% subplot(4,1,4) +% imagesc(cfsr_prec1{1,8}, cfsr_prec1{1,7}, cfsr_mean{4}); +% axis xy +% pbaspect([2 1 1]) +% hold on +% plot(long, lat, 'k'); +% caxis([-1 12]) +% title('CFSR, SON') +% colormap(precip_cmap) +% % clr = colorbar('southoutside') +% % set(get(clr, 'xlabel'), 'String', '[mm/day]') + diff --git a/crt_t2_ts.m b/crt_t2_ts.m new file mode 100644 index 0000000..63d12f8 --- /dev/null +++ b/crt_t2_ts.m @@ -0,0 +1,110 @@ +function [] = crt_t2_ts(cind, time, mn, mx, stp) + + + +% load /home/lorenz-c/Data/Precipitation/MERRA/MERRA_PREC.mat +% load /home/lorenz-c/Data/Precipitation/ECMWF/ECMWF_PREC.mat +% load /home/lorenz-c/Data/Precipitation/CRU3/CRU3_PREC.mat +% load /home/lorenz-c/Data/Precipitation/DEL/DEL_PREC.mat +% load /home/lorenz-c/Data/Precipitation/CFSR/CFSR_PREC.mat +% load /home/lorenz-c/Data/Precipitation/GPCC/GPCC_PREC.mat +% +% +% merra_c_agg_prec = crt_ts(merra_prec, [4 5 9]); +% merra_c_agg_prec = crt_ts(merra_prec, [4 5 9]); +% merra_c_agg_prec = crt_ts(merra_prec, [4 5 9]); +% merra_c_agg_prec = crt_ts(merra_prec, [4 5 9]); +% merra_c_agg_prec = crt_ts(merra_prec, [4 5 9]); +% merra_c_agg_prec = crt_ts(merra_prec, [4 5 9]); + + + +load /home/lorenz-c/Data/Catchment_agg/merra_cagg_t2_sel.mat +load /home/lorenz-c/Data/Catchment_agg/cfsr01_cagg_t2_sel.mat +load /home/lorenz-c/Data/Catchment_agg/ecmwf_cagg_t2_sel.mat +load /home/lorenz-c/Data/Catchment_agg/cru_cagg_t2_sel.mat +load /home/lorenz-c/Data/Catchment_agg/del_cagg_t2_sel.mat + +r = find(cell2mat(merra_cagg_t2_sel(:,2)) == cind); + +yrs = time(1):1:time(2); + +for i = 1:length(yrs) + yr_str(i,:) = ['Jan ', num2str(yrs(i))]; +end + +ecmwf_sind = find(ecmwf_cagg_t2_sel{1,3}(:,1) == 1 & ... + ecmwf_cagg_t2_sel{1,3}(:,2) == time(1)); +ecmwf_eind = find(ecmwf_cagg_t2_sel{1,3}(:,1) == 12 & ... + ecmwf_cagg_t2_sel{1,3}(:,2) == time(2)); + +merra_sind = find(merra_cagg_t2_sel{1,3}(:,1) == 1 & ... + merra_cagg_t2_sel{1,3}(:,2) == time(1)); +merra_eind = find(merra_cagg_t2_sel{1,3}(:,1) == 12 & ... + merra_cagg_t2_sel{1,3}(:,2) == time(2)); + +cfsr_sind = find(cfsr_cagg_t2_sel{1,3}(:,1) == 1 & ... + cfsr_cagg_t2_sel{1,3}(:,2) == time(1)); +cfsr_eind = find(cfsr_cagg_t2_sel{1,3}(:,1) == 12 & ... + cfsr_cagg_t2_sel{1,3}(:,2) == time(2)); + +cru_sind = find(cru_cagg_t2_sel{1,3}(:,1) == 1 & ... + cru_cagg_t2_sel{1,3}(:,2) == time(1)); +cru_eind = find(cru_cagg_t2_sel{1,3}(:,1) == 12 & ... + cru_cagg_t2_sel{1,3}(:,2) == time(2)); + +del_sind = find(del_cagg_t2_sel{1,3}(:,1) == 1 & ... + del_cagg_t2_sel{1,3}(:,2) == time(1)); +del_eind = find(del_cagg_t2_sel{1,3}(:,1) == 12 & ... + del_cagg_t2_sel{1,3}(:,2) == time(2)); + + + +ts_matrix(:,1) = cru_cagg_t2_sel{r,3}(cru_sind:cru_eind,3); +ts_matrix(:,2) = del_cagg_t2_sel{r,3}(del_sind:del_eind,3); +ts_matrix(:,3) = ecmwf_cagg_t2_sel{r,3}(ecmwf_sind:ecmwf_eind,3); +ts_matrix(:,4) = merra_cagg_t2_sel{r,3}(merra_sind:merra_eind,3); +ts_matrix(:,5) = cfsr_cagg_t2_sel{r,3}(cfsr_sind:cfsr_eind,3); + + + + +nr_steps = merra_eind - merra_sind; + +for i = 1:nr_steps+1; + stepnr(i) = i; +end + +figure + +plot(stepnr, ts_matrix(:,1), 'r', 'linewidth', 1) +hold on +plot(stepnr, ts_matrix(:,2), 'g', 'linewidth', 1) +plot(stepnr, ts_matrix(:,3), 'b', 'linewidth', 1) +plot(stepnr, ts_matrix(:,4), 'c', 'linewidth', 1) +plot(stepnr, ts_matrix(:,5), 'm', 'linewidth', 1) + +% legend('CRU', 'DEL', 'ECMWF', 'MERRA', 'CFSR','location','northoutside'); + +set(gca, 'xtick', 1:12:nr_steps); +set(gca, 'xticklabel', yr_str, 'xminortick', 'on', 'xminorgrid', 'on'); + +grid on + + +% axis([1 nr_steps mn mx]); +tlte = ['Averaged monthly temperature for ', merra_cagg_t2_sel{r,1}]; +title(tlte); +ylabel('[°C]') +pbaspect([3 1 1]) + +filenme = ['/home/lorenz-c/Dokumente/Projektarbeit/Analysis/Precipitation/Timeseries/T2_Ts_', ... + merra_cagg_t2_sel{r,1}] +print('-depsc', filenme); + + + + + + + diff --git a/crt_taylor.m b/crt_taylor.m new file mode 100644 index 0000000..9f0d0e9 --- /dev/null +++ b/crt_taylor.m @@ -0,0 +1,82 @@ +function [R_ja, R_ju, E_ja, E_ju, S_ja, S_ju] = crt_taylorARSCHFOTZE(R_ja, R_ju, E_ja, E_ju, S_ja, S_ju, i, inpt1, inpt2) + + +[glb_r, glb_e, glb_s] = comp_glob_corr(inpt1, inpt2, [1989 2006], 'monthly_s', 1, [4 5 9], -9999); +R_ja{1}(:,i) = glb_r(1, 2:end)'; +R_ju{1}(:,i) = glb_r(7, 2:end)'; +E_ja{1}(:,i) = glb_e(1, 2:end)'; +E_ju{1}(:,i) = glb_e(7, 2:end)'; +S_ja{1}(:,i) = glb_s(1, 2:end)'; +S_ju{1}(:,i) = glb_s(7, 2:end)'; + +[glb_r, glb_e, glb_s] = comp_glob_corr(inpt1, inpt2, [1989 2006], 'monthly_s', 4, [4 5 9], -9999); +R_ja{2}(:,i) = glb_r(1, 2:end)'; +R_ju{2}(:,i) = glb_r(7, 2:end)'; +E_ja{2}(:,i) = glb_e(1, 2:end)'; +E_ju{2}(:,i) = glb_e(7, 2:end)'; +S_ja{2}(:,i) = glb_s(1, 2:end)'; +S_ju{2}(:,i) = glb_s(7, 2:end)'; + +[glb_r, glb_e, glb_s] = comp_glob_corr(inpt1, inpt2, [1989 2006], 'monthly_s', 5, [4 5 9], -9999); +R_ja{3}(:,i) = glb_r(1, 2:end)'; +R_ju{3}(:,i) = glb_r(7, 2:end)'; +E_ja{3}(:,i) = glb_e(1, 2:end)'; +E_ju{3}(:,i) = glb_e(7, 2:end)'; +S_ja{3}(:,i) = glb_s(1, 2:end)'; +S_ju{3}(:,i) = glb_s(7, 2:end)'; + +[glb_r, glb_e, glb_s] = comp_glob_corr(inpt1, inpt2, [1989 2006], 'monthly_s', 6, [4 5 9], -9999); +R_ja{4}(:,i) = glb_r(1, 2:end)'; +R_ju{4}(:,i) = glb_r(7, 2:end)'; +E_ja{4}(:,i) = glb_e(1, 2:end)'; +E_ju{4}(:,i) = glb_e(7, 2:end)'; +S_ja{4}(:,i) = glb_s(1, 2:end)'; +S_ju{4}(:,i) = glb_s(7, 2:end)'; + +[glb_r, glb_e, glb_s] = comp_cont_corr(inpt1, inpt2, [1989 2006], 'monthly_s', 7, [4 5 9], -9999); +R_ja{5}(:,i) = glb_r(1, 2:end)'; +R_ju{5}(:,i) = glb_r(7, 2:end)'; +E_ja{5}(:,i) = glb_e(1, 2:end)'; +E_ju{5}(:,i) = glb_e(7, 2:end)'; +S_ja{5}(:,i) = glb_s(1, 2:end)'; +S_ju{5}(:,i) = glb_s(7, 2:end)'; + +[glb_r, glb_e, glb_s] = comp_cont_corr(inpt1, inpt2, [1989 2006], 'monthly_s', 6, [4 5 9], -9999); +R_ja{6}(:,i) = glb_r(1, 2:end)'; +R_ju{6}(:,i) = glb_r(7, 2:end)'; +E_ja{6}(:,i) = glb_e(1, 2:end)'; +E_ju{6}(:,i) = glb_e(7, 2:end)'; +S_ja{6}(:,i) = glb_s(1, 2:end)'; +S_ju{6}(:,i) = glb_s(7, 2:end)'; + +[glb_r, glb_e, glb_s] = comp_cont_corr(inpt1, inpt2, [1989 2006], 'monthly_s', 3, [4 5 9], -9999); +R_ja{7}(:,i) = glb_r(1, 2:end)'; +R_ju{7}(:,i) = glb_r(7, 2:end)'; +E_ja{7}(:,i) = glb_e(1, 2:end)'; +E_ju{7}(:,i) = glb_e(7, 2:end)'; +S_ja{7}(:,i) = glb_s(1, 2:end)'; +S_ju{7}(:,i) = glb_s(7, 2:end)'; + +[glb_r, glb_e, glb_s] = comp_cont_corr(inpt1, inpt2, [1989 2006], 'monthly_s', 8, [4 5 9], -9999); +R_ja{8}(:,i) = glb_r(1, 2:end)'; +R_ju{8}(:,i) = glb_r(7, 2:end)'; +E_ja{8}(:,i) = glb_e(1, 2:end)'; +E_ju{8}(:,i) = glb_e(7, 2:end)'; +S_ja{8}(:,i) = glb_s(1, 2:end)'; +S_ju{8}(:,i) = glb_s(7, 2:end)'; + +[glb_r, glb_e, glb_s] = comp_cont_corr(inpt1, inpt2, [1989 2006], 'monthly_s', 9, [4 5 9], -9999); +R_ja{9}(:,i) = glb_r(1, 2:end)'; +R_ju{9}(:,i) = glb_r(7, 2:end)'; +E_ja{9}(:,i) = glb_e(1, 2:end)'; +E_ju{9}(:,i) = glb_e(7, 2:end)'; +S_ja{9}(:,i) = glb_s(1, 2:end)'; +S_ju{9}(:,i) = glb_s(7, 2:end)'; + +[glb_r, glb_e, glb_s] = comp_cont_corr(inpt1, inpt2, [1989 2006], 'monthly_s', 1, [4 5 9], -9999); +R_ja{10}(:,i) = glb_r(1, 2:end)'; +R_ju{10}(:,i) = glb_r(7, 2:end)'; +E_ja{10}(:,i) = glb_e(1, 2:end)'; +E_ju{10}(:,i) = glb_e(7, 2:end)'; +S_ja{10}(:,i) = glb_s(1, 2:end)'; +S_ju{10}(:,i) = glb_s(7, 2:end)'; \ No newline at end of file diff --git a/crt_tqv_ts.m b/crt_tqv_ts.m new file mode 100644 index 0000000..e31b847 --- /dev/null +++ b/crt_tqv_ts.m @@ -0,0 +1,30 @@ +% function [] = crt_tqv_ts + +% load /media/storage/Data/Total_water_atm/SSMI/SSMI_TQV.mat +% ssmi = comp_glob_quant(ssmi_tqv, [1989 2006], 1, 'complete', [4 5 9], 'NaN', 0); +% clear ssmi_tqv + +load /media/storage/Data/Total_water_atm/CFSR/CFSR_TQV.mat +cfsr = comp_glob_quant(cfsr_tqv, [1989 2006], 0, 'complete', [4 5 9], 'NaN', 0); +clear cfsr_tqv + +load /media/storage/Data/Total_water_atm/ECMWF/ECMWF_TQV.mat +ecmwf = comp_glob_quant(ecmwf_tqv, [1989 2006], 0, 'complete', [4 5 9], 'NaN', 0); +clear ecmwf_tqv + + +load /media/storage/Data/Total_water_atm/MERRA/MERRA_TQV.mat +merra = comp_glob_quant(merra_tqv, [1989 2006], 0, 'complete', [4 5 9], 'NaN', 0); +clear merra_tqv + + +fu = figure('papersize', [4.5 4], 'paperunits', 'centimeters') + +% plot(ssmi(:,3), 'm', 'linewidth', 1.5) + +plot(ecmwf(:,3), 'b', 'linewidth', 1.5); +hold on +plot(merra(:,3), 'r', 'linewidth', 1.5) +plot(cfsr(:,3), 'g', 'linewidth', 1.5); + +grid on diff --git a/crt_ts.m b/crt_ts.m new file mode 100644 index 0000000..7de5bc3 --- /dev/null +++ b/crt_ts.m @@ -0,0 +1,63 @@ +function otpt = spataggmn(inpt, clms, miss, id_map, area_id) +% The function computes time-series of area-weighted means over selected +% areas. These areas are defined in the id_map (a matrix where connected +% regions have the same id). The user can choose multiple areas according +% to their area_id. +%-------------------------------------------------------------------------- +% Input: n [1 x 1] angular side length of a pixel [deg] +% (default: n = 0.5°) +% theta [n x 1] co-latitude of the pixel center [deg] +% +% Output: A [n x 1] area of the pixels on the surface +% of the Earth [m^2] +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: January 2008 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + + +if size(area_id,1) > 1 + area_id = area_id'; +end + +mnths = cell2mat(inpt(:, clms(1))); +yrs = cell2mat(inpt(:, clms(2))); +flds = inpt(:, clms(3)); +clear inpt + +theta = 0.25:0.5:179.75; +A_mer = area_wghts(theta, 'mat'); + +nr_tmestps = length(mnths); +nr_catch = length(area_id); + +dte = datenum(yrs, mnths, ones(nr_tmestps,1)*15); + +otpt = zeros(nr_tmestps + 1, nr_catch + 3); +otpt(1, 4:end) = area_id; +otpt(2:end, 1) = mnths; +otpt(2:end, 2) = yrs; +otpt(2:end, 3) = dte; + +mask = zeros(360, 720); +tmp = zeros(nr_tmestps,1); + +h = waitbar(0,'','Name','...% of catchments computed'); +for i = 1:nr_catch + mask = ismember(id_map, area_id(i)); + tmp = cellfun(@(x) comp_c_sig(x, miss, mask, A_mer), flds); + otpt(2:end, i+3) = tmp; + waitbar(i/nr_catch, h, [int2str((i*100)/nr_catch) '%']) +end +close(h) + +function catch_sig = comp_c_sig(fld, miss, mask, A_mer) + mask(fld == miss) = 0; + tmp = mask.*A_mer; + A_ctch = tmp/sum(sum(tmp)); + catch_sig = sum(sum(fld.*A_ctch)); + + + diff --git a/crt_tspat_corr.m b/crt_tspat_corr.m new file mode 100644 index 0000000..5242718 --- /dev/null +++ b/crt_tspat_corr.m @@ -0,0 +1,34 @@ +% +% +% load /media/storage/Data/Precipitation/GPCC/GPCC_PRECv4.0.mat +% gpcc = comp_spat_mean(gpcc_prec, [1989 2006], 'seasonal_2',[4 5 9], -9999); +% clear *prec +% +% load /media/storage/Data/Precipitation/CRU3/CRU3_PRECv3.0.mat +% cru = comp_spat_mean(cru_prec, [1989 2006], 'seasonal_2',[4 5 9], -9999); +% clear *prec +% +% load /media/storage/Data/Precipitation/CPC/CPC_PREC.mat +% cpc = comp_spat_mean(cpc_prec, [1989 2006], 'seasonal_2',[4 5 9], -9999); +% clear *prec +% +% load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +% ecmwf = comp_spat_mean(ecmwf_prec, [1989 2006], 'seasonal_2',[4 5 9], -9999); +% clear *prec +% +% load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat +% merra = comp_spat_mean(merra_prec, [1989 2006], 'seasonal_2',[4 5 9], -9999); +% clear *prec +% +% load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat +% cfsr = comp_spat_mean(cfsr_prec, [1989 2006], 'seasonal_2',[4 5 9], -9999); +% clear *prec + +for i = 1:4 + R_cru{i} = comp_tspat_corr(gpcc(:,i), cru(:,i), -9999); + R_cpc{i} = comp_tspat_corr(gpcc(:,i), cpc(:,i), -9999); + R_ecmwf{i} = comp_tspat_corr(gpcc(:,i), ecmwf(:,i), -9999); + R_merra{i} = comp_tspat_corr(gpcc(:,i), merra(:,i), -9999); + R_cfsr{i} = comp_tspat_corr(gpcc(:,i), cfsr(:,i), -9999); +end + diff --git a/crt_zon_conts.m b/crt_zon_conts.m new file mode 100644 index 0000000..1b2efba --- /dev/null +++ b/crt_zon_conts.m @@ -0,0 +1,89 @@ +function [] = crt_zon_conts(set1, set2, set3, set4) +% +% load /media/storage/Data/Precipitation/GPCP/GPCP_PRECv2.1.mat +% load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +% load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat +% load /media/storage/Data/Precipitation/CFSR/CFSR_PREC01.mat +load /home/lorenz-c/Data/colormaps/precip_zonal.mat +% cf_zonal_glob = comp_zon_prec(cfsr_prec1, [4 5 9], [1979 2009], 0, 'mnth'); +% ec_zonal_glob = comp_zon_prec(ecmwf_prec, [4 5 9], [1989 2009], 0, 'mnth'); +% mr_zonal_glob = comp_zon_prec(merra_prec, [4 5 9], [1979 2009], 0, 'mnth'); +% gp_zonal_glob = comp_zon_prec(gpcp_prec, [4 5 9], [1979 2009], 0, 'mnth'); + +v = 0:1:10; +t = 0.25:0.5:179.75; +x = 1:1:12; + +tck{1} = '90S'; +tck{2} = '60S'; +tck{3} = '30S'; +tck{4} = '0'; +tck{5} = '30N'; +tck{6} = '60N'; +tck{7} = '90N'; + +xtck{1} = 'J'; +xtck{2} = 'F'; +xtck{3} = 'M'; +xtck{4} = 'A'; +xtck{5} = 'M'; +xtck{6} = 'J'; +xtck{7} = 'J'; +xtck{8} = 'A'; +xtck{9} = 'S'; +xtck{10} = 'O'; +xtck{11} = 'N'; +xtck{12} = 'D'; + +c = figure; +set(c,'paperunits','centimeters') +set(c, 'papertype', 'a4'); +set(c,'paperposition',[0.5,0.5,24,20]) +set(c, 'paperorientation', 'landscape') + +s1 = subplot(2,2,1, 'align') +contourf(x,t,flipud(set1), v, 'linestyle', 'none'); +set(s1, 'ytick', 0:30:180); +set(s1, 'yticklabel', tck); +set(s1, 'xtick', 1:12); +set(s1, 'xticklabel', xtck); +colormap(precip_zonal) +title('GPCP') +axis([1 12 0 180]) +pbaspect([2 1.5 1]) + +s2 = subplot(2,2,2, 'align') +contourf(x,t,flipud(set2), v, 'linestyle', 'none'); +set(s2, 'ytick', 0:30:180); +set(s2, 'yticklabel', tck); +set(s2, 'xtick', 1:12); +set(s2, 'xticklabel', xtck); +colormap(precip_zonal) +title('ECMWF') +axis([1 12 0 180]) +pbaspect([2 1.5 1]) + + +s3 = subplot(2,2,3, 'align') +contourf(x,t,flipud(set3), v, 'linestyle', 'none'); +set(s3, 'ytick', 0:30:180); +set(s3, 'yticklabel', tck); +set(s3, 'xtick', 1:12); +set(s3, 'xticklabel', xtck); +colormap(precip_zonal) +title('MERRA') +axis([1 12 0 180]) +pbaspect([2 1.5 1]) + + +s4 = subplot(2,2,4, 'align') +contourf(x,t,flipud(set4), v, 'linestyle', 'none'); +set(s4, 'ytick', 0:30:180); +set(s4, 'yticklabel', tck); +set(s4, 'xtick', 1:12); +set(s4, 'xticklabel', xtck); +colormap(precip_zonal) +title('CFSR') +axis([1 12 0 180]) +pbaspect([2 1.5 1]) + diff --git a/crt_zonal_plts.m b/crt_zonal_plts.m new file mode 100644 index 0000000..7a14318 --- /dev/null +++ b/crt_zonal_plts.m @@ -0,0 +1,265 @@ +% function [] = crt_zonal_plts +load /home/lorenz-c/Data/colormaps/precip_zonal.mat + +ytck{1} = '90S'; +ytck{2} = '70S'; +ytck{3} = '50S'; +ytck{4} = '30S'; +ytck{5} = '10S'; +ytck{6} = '10N'; +ytck{7} = '30N'; +ytck{8} = '50N'; +ytck{8} = '70N'; +ytck{8} = '50N'; +ytck{9} = '70N'; +ytck{10} = '90N'; + + +xtck{1}='J'; +xtck{2}='F'; +xtck{3}='M'; +xtck{4}='A'; +xtck{5}='M'; +xtck{6}='J'; +xtck{7}='J'; +xtck{8}='A'; +xtck{9}='S'; +xtck{10}='O'; +xtck{11}='N'; +xtck{12}='D'; + + + + + +load /media/storage/Data/Precipitation/GPCC/GPCC_PRECv4.0.mat +gpcc_zon_ann = comp_zon_quant(gpcc_prec, [1989 2006], 3, 'annual'); +gpcc_zon_snl = comp_zon_quant(gpcc_prec, [1990 2006], 3, 'seasonal'); +gpcc_zon_mnt = comp_zon_quant(gpcc_prec, [1989 2006], 3, 'monthly'); +clear gpcc_prec + +gpcc{1} = gpcc_zon_ann; +gpcc{2} = gpcc_zon_snl; +gpcc{3} = gpcc_zon_mnt; +gpcc{3} = [gpcc{3}(:,12) gpcc{3}(:, 1:12) gpcc{3}(:,1)]; +clear gpcc_zon* + + +c = figure +contourf(flipud(gpcc{1,3}), 0:1:9, 'linestyle', 'none') +caxis([0 9]) +set(gca, 'xtick', 1.5:1:13.5) +set(gca, 'xticklabel', xtck, 'fontsize', 14) +set(gca, 'ytick', 0:40:360) +set(gca, 'yticklabel', ytck, 'fontsize', 14) +axis([1 14 80 320]) +colormap(precip_zonal) +pbaspect([14 6 1]) +grid on +print -depsc gpcc_zonal.eps +close(c) +sprintf('GPCC done') + +load /media/storage/Data/Precipitation/CPC/CPC_PREC.mat +cpc_zon_ann = comp_zon_quant(cpc_prec, [1989 2006], 3, 'annual'); +cpc_zon_snl = comp_zon_quant(cpc_prec, [1990 2006], 3, 'seasonal'); +cpc_zon_mnt = comp_zon_quant(cpc_prec, [1989 2006], 3, 'monthly'); +clear cpc_prec + +cpc{1} = cpc_zon_ann; +cpc{2} = cpc_zon_snl; +cpc{3} = cpc_zon_mnt; +cpc{3} = [cpc{3}(:,12) cpc{3}(:, 1:12) cpc{3}(:,1)]; + +clear cpc_zon* +c = figure +contourf(flipud(cpc{1,3}), 0:1:9, 'linestyle', 'none') +caxis([0 9]) +set(gca, 'xtick', 1.5:1:13.5) +set(gca, 'xticklabel', xtck, 'fontsize', 14) +set(gca, 'ytick', 0:40:360) +set(gca, 'yticklabel', ytck, 'fontsize', 14) +axis([1 14 80 320]) +colormap(precip_zonal) +pbaspect([14 6 1]) +grid on +print -depsc2 cpc_zonal.eps +close(c) +sprintf('CPC done') + +load /media/storage/Data/Precipitation/CRU3/CRU3_PRECv3.0.mat +cru_zon_ann = comp_zon_quant(cru_prec, [1989 2006], 3, 'annual'); +cru_zon_snl = comp_zon_quant(cru_prec, [1990 2006], 3, 'seasonal'); +cru_zon_mnt = comp_zon_quant(cru_prec, [1989 2006], 3, 'monthly'); +clear cru_prec + +cru{1} = cru_zon_ann; +cru{2} = cru_zon_snl; +cru{3} = cru_zon_mnt; +cru{3} = [cru{3}(:,12) cru{3}(:, 1:12) cru{3}(:,1)]; + +clear cru_zon* +c = figure +contourf(flipud(cru{1,3}), 0:1:9, 'linestyle', 'none') +caxis([0 9]) +set(gca, 'xtick', 1.5:1:13.5) +set(gca, 'xticklabel', xtck, 'fontsize', 14) +set(gca, 'ytick', 0:40:360) +set(gca, 'yticklabel', ytck, 'fontsize', 14) +axis([1 14 80 320]) +colormap(precip_zonal) +pbaspect([14 6 1]) +grid on +print -depsc2 cru_zonal.eps +close(c) +sprintf('CRU done') + +load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat +cfsr_zon_ann = comp_zon_quant(cfsr_prec, [1989 2006], 3, 'annual'); +cfsr_zon_snl = comp_zon_quant(cfsr_prec, [1990 2006], 3, 'seasonal'); +cfsr_zon_mnt = comp_zon_quant(cfsr_prec, [1989 2006], 3, 'monthly'); +clear cfsr_prec + +cfsr{1} = cfsr_zon_ann; +cfsr{2} = cfsr_zon_snl; +cfsr{3} = cfsr_zon_mnt; +cfsr{3} = [cfsr{3}(:,12) cfsr{3}(:, 1:12) cfsr{3}(:,1)]; +clear cfsr_zon* + +c = figure +contourf(flipud(cfsr{1,3}), 0:1:9, 'linestyle', 'none') +caxis([0 9]) +set(gca, 'xtick', 1.5:1:13.5) +set(gca, 'xticklabel', xtck, 'fontsize', 14) +set(gca, 'ytick', 0:40:360) +set(gca, 'yticklabel', ytck, 'fontsize', 14) +axis([1 14 80 320]) +colormap(precip_zonal) +pbaspect([14 6 1]) +grid on +print -depsc2 cfsr_zonal.eps +close(c) +sprintf('CFSR done') + +load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat +merra_zon_ann = comp_zon_quant(merra_prec, [1989 2006], 3, 'annual'); +merra_zon_snl = comp_zon_quant(merra_prec, [1990 2006], 3, 'seasonal'); +merra_zon_mnt = comp_zon_quant(merra_prec, [1989 2006], 3, 'monthly'); +clear merra_prec + +merra{1} = merra_zon_ann; +merra{2} = merra_zon_snl; +merra{3} = merra_zon_mnt; +merra{3} = [merra{3}(:,12) merra{3}(:, 1:12) merra{3}(:,1)]; +clear merra_zon* +c = figure +contourf(flipud(merra{1,3}), 0:1:9, 'linestyle', 'none') +caxis([0 9]) +set(gca, 'xtick', 1.5:1:13.5) +set(gca, 'xticklabel', xtck, 'fontsize', 14) +set(gca, 'ytick', 0:40:360) +set(gca, 'yticklabel', ytck, 'fontsize', 14) +axis([1 14 80 320]) +colormap(precip_zonal) +pbaspect([14 6 1]) +grid on +f = colorbar('southoutside') +set(get(f, 'xlabel'), 'string', '[mm/day]') +set(f, 'fontsize', 14) + +print -depsc2 merra_zonal.eps +close(c) +sprintf('MERRA done') + +load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +ecmwf_zon_ann = comp_zon_quant(ecmwf_prec, [1989 2006], 3, 'annual'); +ecmwf_zon_snl = comp_zon_quant(ecmwf_prec, [1990 2006], 3, 'seasonal'); +ecmwf_zon_mnt = comp_zon_quant(ecmwf_prec, [1989 2006], 3, 'monthly'); +clear ecmwf_prec + +ecmwf{1} = ecmwf_zon_ann; +ecmwf{2} = ecmwf_zon_snl; +ecmwf{3} = ecmwf_zon_mnt; +ecmwf{3} = [ecmwf{3}(:,12) ecmwf{3}(:, 1:12) ecmwf{3}(:,1)]; +clear ecmwf_zon* +c = figure +contourf(flipud(ecmwf{1,3}), 0:1:9, 'linestyle', 'none') +caxis([0 9]) +set(gca, 'xtick', 1.5:1:13.5) +set(gca, 'xticklabel', xtck, 'fontsize', 14) +set(gca, 'ytick', 0:40:360) +set(gca, 'yticklabel', ytck, 'fontsize', 14) +axis([1 14 80 320]) +colormap(precip_zonal) +pbaspect([14 6 1]) +grid on +print -depsc2 ecmwf_zonal.eps +close(c) +sprintf('ECMWF done') + + + + +% c = figure +% +% set(c,'paperunits','centimeters') +% set(c, 'papertype', 'a4'); +% set(c,'paperposition',[0.5,2,20,25]) + +% indx = 1:360; +% +tck{1} = '90N'; +tck{2} = '45N'; +tck{3} = '0N'; +tck{4} = '45S'; +tck{5} = '90S'; + + +% ttle{1} = 'January'; +% ttle{2} = 'February'; +% ttle{3} = 'March'; +% ttle{4} = 'April'; +% ttle{5} = 'May'; +% ttle{6} = 'June'; +% ttle{7} = 'July'; +% ttle{8} = 'August'; +% ttle{9} = 'September'; +% ttle{10} = 'October'; +% ttle{11} = 'November'; +% ttle{12} = 'December'; + +% figure +% plot(indx, gpcc_zon_ann, 'k'); +% hold on +% plot(indx, cpc_zon_ann, '--k'); +% plot(indx, ecmwf_zon_ann, 'Color', [0.5 0.5 0.5]); +% plot(indx, merra_zon_ann, '--', 'color', [0.5 0.5 0.5]); +% plot(indx, cfsr_zon_ann, '-.', 'color', [0.5 0.5 0.5]); +% +% set(gca, 'xtick', 0:90:360) +% set(gca, 'xticklabel', tck) + + +% end +% +% if i == 10 | i == 11 | i == 12 +% xlabel('Latitude') +% set(gca, 'xticklabel', tck) +% else +% set(gca, 'xticklabel', ' ') +% end +% +% % if i == 3 +% % legend('GPCC', 'CRU', 'DEL', 'CPC', 'ECMWF', 'MERRA', 'CFSR') +% % end +% +% axis([0 360 0 2]) +% grid on +% hold off +% end +% +% print('-depsc', '-r300', 'zonal_mnth_prec') + + + + \ No newline at end of file diff --git a/cs2sc.m b/cs2sc.m new file mode 100755 index 0000000..e152543 --- /dev/null +++ b/cs2sc.m @@ -0,0 +1,40 @@ +function sc = cs2sc(field,backval) + +% CS2SC(FIELD,backval) converts the square (L+1)x(L+1) matrix FIELD, containing +% spherical harmonics coefficients in |C\S| storage format into a +% rectangular (L+1)x(2L+1) matrix in /S|C\format. +% The argument backval is optional and describes the matrix entries, +% where m > l. Default is 1e-20! + +%---------------------------------------------------------------------------- +% Nico Sneeuw, IAPG, TU-Munich 22/07/94 +%---------------------------------------------------------------------------- +% uses none +% +%---------------------------------------------------------------------------- +% revision history +% 07/99, Matthias Weigelt - V5 adaptation, eliminiation of TRAPSTRIP +%---------------------------------------------------------------------------- +% remarks +% +%---------------------------------------------------------------------------- + + + +if nargin == 1, backval = 1e-20; end +[rows,cols] = size(field); +lmax = rows -1; +if cols == (2*lmax +1) + sc = field; + display('Field was already in SC format') +elseif cols ~= rows, + error('I expect a square matrix.'), +elseif cols == rows + c = tril(field); + s = rot90(triu(field,1),-1); + + mask = backval*ones(lmax+1,2*lmax+1); + a = fliplr(triu(mask(:,1:cols-1))); + b = triu(mask(:,cols:2*lmax+1),1); + sc = [a b] + [s(:,2:lmax+1) c]; +end diff --git a/ctch_corr.m b/ctch_corr.m new file mode 100644 index 0000000..ab584c1 --- /dev/null +++ b/ctch_corr.m @@ -0,0 +1,7 @@ +function otpt = ctch_corr(corr, catch_indx, indexfle) + +otpt = zeros(360,720); + +for i = 1:length(catch_indx) + otpt(indexfle == catch_indx(i)) = corr(i); +end diff --git a/cubft2mm.m b/cubft2mm.m new file mode 100644 index 0000000..d1b8cc2 --- /dev/null +++ b/cubft2mm.m @@ -0,0 +1,25 @@ +function R = cubft2mm(inpt, area) + +% 1 foot = 0.000189393939 miles +% +% +% area = area +% +% A = inpt*2446.58; % -> Cubic meters per day +% B = A./area; % -> m/day + +% Convert drainage are to square feet +A = area*27878400; + +% Cubic feet per second -> cubic feet per day +B = 86400; + +% Combine A and B +C = B/A; +C = C*12*25.4; + +R = inpt.*C; + + + + diff --git a/daysinmonth.m b/daysinmonth.m new file mode 100644 index 0000000..1a9433f --- /dev/null +++ b/daysinmonth.m @@ -0,0 +1,25 @@ +function nr_days = daysinmonth(month, year) + + +l_year = [1980; 1984; 1988; 1992; 1996; 2000; 2004; 2008; 2012]; + +if find(year == l_year) + n_feb = 29; +else + n_feb = 28; +end + +mnth31 = [1 3 5 7 8 10 12]; +mnth30 = [4 6 9 11]; + +if find(month == mnth31) + nr_days = 31; +elseif find(month == mnth30) + nr_days = 30; +elseif month == 2 & find(year == l_year) + nr_days = 29; +else + nr_days = 28; +end + + diff --git a/degvar.m b/degvar.m new file mode 100644 index 0000000..c325aef --- /dev/null +++ b/degvar.m @@ -0,0 +1,90 @@ +function dv = degvar(spctrm,maxdeg,cum,fstr,h,cap,type) + +% DEGVAR(SPCTRM,MAXDEG) computes the root-mean-squared error of the degree- +% variances. +% +% dv = degvar(spctrm,maxdeg) +% dv = degvar(spctrm,maxdeg,cum,fstr,h,cap,type) +% +% INPUT +% spctrm - Co-efficients of the spectral field given in sc- or +% cs-formats or in [l m Clm Slm] format. +% maxdeg - Maximum degree of the co-efficients. +% cum - Cumulative degree variances are computed if cum=1, else +% degree variances are alone calculated. +% fstr - String that defines the quantity to be calculated. +% 'none' - dimensionless, 'geoid' [m], 'smd' [kg/m^2], +% 'water' [m], 'potential' [m^2/s^2], 'dg' or 'gravity' +% (gravity anomalies), 'tr' (gravity disturbances),'trr' +% (d^2/dr^2), 'slope' (slope of the surface gradient). +% def - ['none'] +% h - height of the point of evaluation [m] +% cap - Smoothing radius Pellinen <= 90[deg], Gaussian >= 100[km]. +% type - 'champ', 'grace', 'normal' +% +% OUTPUT +% dv - Degree variances (power spectrum) of the spectral field. +%-------------------------------------------------------------------------- + +% Created on 29 May 2007 +% Modified on 9 January 2009 +% added additional inputs for applying isotf function +% removed for loops +% Author: Balaji Devaraju, Stuttgart +%-------------------------------------------------------------------------- + +% dv = [(0:maxdeg)',zeros(maxdeg+1,1)]; +[rows, cols] = size(spctrm); + +if cols == 4 || ((2*rows - 1) == cols) || (rows == cols) + + if nargin == 2 + cum = 0; + fstr = 'none'; + h = 0; + cap = 0; + type = 'none'; + elseif nargin == 3 + fstr = 'none'; + h = 0; + cap = 0; + type = 'none'; + elseif nargin == 4 + h = 0; + cap = 0; + type = 'none'; + elseif nargin == 5 + cap = 0; + type = 'none'; + elseif nargin == 6 + type = 'none'; + end + + cum = logical(cum); + + + if cols == 4 + if length(spctrm) == sum(1:maxdeg+1) + spctrm = sc2cs(gcoef2sc(cssc2clm(spctrm,120))); + else + error('Incomplete set of co-efficients') + end + elseif (2*rows-1 == cols) + spctrm = sc2cs(spctrm); + end + + if size(spctrm,1) == size(spctrm,2) + dv = tril(spctrm); + spctrm = spctrm - dv; + dv = (sum(dv'.^2) + sum(spctrm.^2))'; + if nargin > 3 + dv = dv.*(isotf((0:maxdeg)',fstr,h,cap,type)).^2; + end + if cum + dv = cumsum(dv); + end + dv = [(0:maxdeg)' dv]; + end +else + error('The format is not compatible with the function') +end diff --git a/doiceana.m b/doiceana.m new file mode 100644 index 0000000..acf304b --- /dev/null +++ b/doiceana.m @@ -0,0 +1,110 @@ +function [B, Se, recon_ic, recon_F] = doiceana(inpt, mxmde, mask, theta) + + +if nargin < 4, theta = 89.75:-0.5:-89.75; end +if nargin < 3, mask = ones(size(inpt{1})); end +if nargin < 2, mxmde = length(inpt); end + + +doeof = input('Perform PCA? Y/N [Y]: ', 's'); +if isempty(doeof) + doeof = 'Y'; +end +[rws, cls] = size(inpt{1}); + + +if strcmp(doeof, 'Y') + fprintf('Performing PCA.....') + [eofs, pcs, lams, recon_G] = eof_ana(inpt, 'mask', mask, ... + 'mxmde', mxmde, ... + 'areawght', false, ... + 'dorecon', false); + fprintf(' Done! \n') + showeigs = input('Show eigenvalues? Y/N [Y]: ', 's'); + + if isempty(showeigs) + doeof = 'Y'; + end + + if strcmp(showeigs, 'Y') + plot(1:mxmde, lams(:,2)); + xlabel('Mode'); + ylabel('%'); + fprintf('%g \n', lams(:,3)) + end + + changemd = input('Change number of modes? Y/N [N]: ', 's'); + if isempty(changemd) + changemd = 'N'; + end + + if strcmp(changemd, 'Y') + mxmde = input('Enter number of modes: '); + end +end + +[F, c_indx] = cell2catchmat(inpt, mask, theta'); + +decomp = input('1 for temporal decomposition; 2 for spatial decomposition: '); + +if decomp == 1 + + fprintf('Performing tempora decomposition... ') + B = jadeR(F, mxmde, true); + fprintf('Done! \n') + fprintf('Computing the source signals... ') + Se = B * F; + fprintf('Done! \n') + + maprecon = input('Reconstruct maps? Y/N [N]: ', 's'); + if isempty(maprecon) + maprecon = 'Y'; + end + + if strcmp(maprecon, 'Y') + A = pinv(B); + recon_ic = catchmat2cell(Se, c_indx, rws, cls); + recon_F = catchmat2cell(A*Se, c_indx, rws, cls); + else + recon_ic = 0; + recon_F = 0; + end + + +elseif decomp == 2 + + fprintf('Performing spatial decomposition... ') + B = jadeR(F', mxmde, true); + fprintf('Done! \n') + fprintf('Computing the source signals... ') + Se = B * F'; + fprintf('Done! \n') + + maprecon = input('Reconstruct maps? Y/N [N]: ', 's'); + + if isempty(maprecon) + maprecon = 'Y'; + end + + if strcmp(maprecon, 'Y') + A = pinv(B); + recon_ic = catchmat2cell(B, c_indx, rws, cls); + recon_F = catchmat2cell((A*Se)', c_indx, rws, cls); + else + recon_ic = 0; + recon_F = 0; + end + +end + + + + + + + + + + + + diff --git a/doy2date.m b/doy2date.m new file mode 100644 index 0000000..d97bebd --- /dev/null +++ b/doy2date.m @@ -0,0 +1,27 @@ +function date = doy2date(doy, year); + + +l_year = [1980 1984 1988 1992 1996 2000 2004 2008 2012]; + +if find(l_year == year) + n_feb = 29; +else + n_feb = 28; +end + +n_days = [31 n_feb 31 30 31 30 31 31 30 31 30 31]; + +days_year = 0; + +for i = 1:12 + days_month = 0; + for j = 1:n_days(i) + days_year = days_year + 1; + if doy == days_year + date = [doy j, i, year]; + break; + end + end +end + + diff --git a/dtevec.m b/dtevec.m new file mode 100644 index 0000000..850873d --- /dev/null +++ b/dtevec.m @@ -0,0 +1,91 @@ +function dte = dtevec(sdte, edte, tscale) + +if nargin < 3, tscale = 'monthly'; end + +if length(sdte) == 3 + dte = zeros(1,4); + sday = sdte(1); + smnth = sdte(2); + syr = sdte(3); +elseif length(sdte) == 2 + dte = zeros(1,2); + smnth = sdte(1); + syr = sdte(2); + + if strcmp(tscale, 'monthly') + sday = 15; + elseif strcmp(tscale, 'daily') + sday = 1; + end + + +end + + +if length(edte) == 3 + eday = edte(1); + emnth = edte(2); + eyr = edte(3); +elseif length(edte) == 2 + emnth = edte(1); + eyr = edte(2); + if strcmp(tscale, 'monthly') + eday = 15; + elseif strcmp(tscale, 'daily') + eday = eomday(eyr, emnth); + end + +end + +k = 1; + +snum = datenum(syr, smnth, sday); +yr = syr; +mnth = smnth; +day = sday; + +enum = datenum(eyr, emnth, eday); + +actnum = snum; + +if strcmp(tscale, 'monthly') + while actnum <= enum + dte(k, 1) = mnth; + dte(k, 2) = yr; + dte(k, 3) = actnum; + + mnth = mnth + 1; + + if mnth == 13 + mnth = 1; + yr = yr + 1; + end + + actnum = datenum(yr, mnth, 15); + k = k + 1; + end +elseif strcmp(tscale, 'daily') + while actnum <= enum + dte(k, 1) = day; + dte(k, 2) = mnth; + dte(k, 3) = yr; + dte(k, 4) = actnum; + + day = day + 1; + + if day > eomday(yr, mnth) + day = 1; + mnth = mnth + 1; + + if mnth == 13 + mnth = 1; + yr = yr + 1; + end + end + actnum = datenum(yr, mnth, day); + k = k + 1; + end +end + + + diff --git a/ecdfbiv.m b/ecdfbiv.m new file mode 100644 index 0000000..3fbd833 --- /dev/null +++ b/ecdfbiv.m @@ -0,0 +1,30 @@ +%% Programmed by Taesam Lee +% Dec 2009, INRS-ETE, Quebec +function [biv_CDF,x_c,y_c,biv_PDF]=ecdfbiv(XD,nxbin,nybin) +%% Estimating Empirical Bivariate CDF +% XD =nL*2 matrix where nL is the record length +% nxbin = bin_number for x (default=10) +% nybin = bin_number for y (default=10) +% example +% XD=mvnrnd([0,0],[1,0.7;0.7,1],1000); +% [biv_CDF,x_c,y_c]=ecdfbiv(XD,20,20) +% surfc(x_c,y_c,biv_CDF) +nL=length(XD); +if(nargin==2) + [N,C]=hist3(XD); +else + [N,C]=hist3(XD,[nxbin,nybin]); +end +bin_dist(1)=(C{1}(2)-C{1}(1))/2;bin_dist(2)=(C{2}(2)-C{2}(1))/2; +biv_PDF=N/nL/(bin_dist(1)*2*bin_dist(2)*2); + +x_c=C{1}+bin_dist(1); +y_c=C{2}+bin_dist(2); +sum1=0; +for i1=1:length(C{1}) + for i2=1:length(C{2}) + s1=sum(sum(N(1:i1,1:i2))); + biv_CDF(i1,i2)=s1/nL; + end +end +% surfc(x_c,y_c,biv_CDF) \ No newline at end of file diff --git a/ecopula.m b/ecopula.m new file mode 100644 index 0000000..6130ef2 --- /dev/null +++ b/ecopula.m @@ -0,0 +1,24 @@ +function ecop = ecopula(x) +%ECOPULA Empirical copula based on sample X. +% ECOP = ECOPULA(X) returns bivariate empirical copula. Extension to +% n dimensional empirical copula is straightforward. +% +% Written by Robert Kopocinski, Wroclaw University of Technology, +% for Master Thesis: "Simulating dependent random variables using copulas. +% Applications to Finance and Insurance". +% Date: 2007/05/12 +% +% Reference: +% [1] Durrleman, V. and Nikeghbali, A. and Roncalli, T. (2000) Copulas approximation and +% new families, Groupe de Recherche Operationnelle Credit Lyonnais + +[m n] = size(x); + +y = sort(x); + +for i=1:m + for j=1:m + ecop(i,j) = sum( (x(:,1)<=y(i,1)).*(x(:,2)<=y(j,2)) )/m; + + end +end diff --git a/efk_example.m b/efk_example.m new file mode 100644 index 0000000..03a385d --- /dev/null +++ b/efk_example.m @@ -0,0 +1,117 @@ +% Extended Kalman Filter Demo + +%{ + +Version 1.0, September 2006 + +This tutorial was written by Jose Manuel Rodriguez, +Universidad Politecnica de Cataluña, Spain +Based on the Technical Document by +Juan Andrade-Cetto, "The Kalman Filter", March 2002 +%} + +%{ +x is the real plant behavior, in this case a sinus wave +with the following formulation: + +x(time)=sin(frec(@ time-1)*time-1) + ramdom error signal("sigmav") +frec(time)=frec(@ time-1) + +x_ is the predicted state, this is where Kalman filter will come +and where we will correct our estimations using an observation + +z is the observation of the real plant, in this case corresponding only to +the position at a given time. Note that this observation is subject to an +error, therefore the resulting equation is: z(time)=x(time)+ramdom error("sigmaw") + +Our first prediction will come from the plant ideal behavior, then +using the observations and error covariance we will obtain a better estimate. + +xc is the ideal plant behavior... this is used just for comparison + +P is the state error covariances at a given time of all the involved variables, +note that we are forming x as a 2 by 1 matrix with the following: +x(1,n) -> position +x(2,n) -> frecuency + +Our functions are as following for this example: +Let's say: +f1: x1(time)=sin(x2(time-1)*(time-1))+V V->ramdom plant error +f2: x2(time)=x2(time-1) +h: y=x1+w w->ramdom sensor error + +F is the Jacobian of the transfer function due to the involved variables, +in this case these are x1 and x2, therefore F will be a 2 by 2 matrix +(always the matrix is square). The resulting F depends on time and must be +computed for every step that the system takes. + +F is as follows: +F -> df1/dx1 = 0 df1/dx2 = cos(x2*time)*time + df2/dx1 = 0 df2/dx1 = 1 + +%} + +clear all; close all; + + +% Initial Conditions +x(:,1) = [0;0.05]; %Our real plant initial condition +x_(:,1) = [0;0.04]; %Our estimate initial conidition (they might differ) +xc = x_; %Set the ideal model as we think it should start +P = [0.01 0; %set initial error covariance for position & frec, both at sigma 0.1, P=diag([sigma_pos_init^2 sigmav_frec_init^2]) + 0 0.01]; +sigmav = 0.1; %the covariance coeficient for the position error, sigma +sigmaw = 0.5; %the covariance coeficient for the frecuency error, sigma +Q = sigmav*sigmav; %the error covariance constant to be used, in this case just a escalar unit +R = sigmaw*sigmaw; %the error covariance constant to be used, in this case just a escalar unit + +G = [1;0]; %G is the Jacobian of the plant tranfer functions due to the error. +H = [ 1 0]; %H is the Jacobian of the sensor transfer functions due to the variables involved +W = 1; %W is the Jacobian of the sensor transfer functions due to the error. + +steps = 1000; %Amount of steps to simulate + +% bucle +for i =2:steps %start @ time=2 + % the real plant + x(:,i) = [sin(x(2,i-1)*(i-1)) + randn*sigmav ; x(2,i-1) ]; + z(i) = x(1,i) + randn*sigmaw; + + % blind prediction (just for comparison) + xc(:,i) = [sin(xc(2,i-1)*(i-1)); xc(2,i-1)]; + % prediction + x_(:,i) = [sin(x_(2,i-1)*(i-1)); x_(2,i-1)]; + z_(i) = x_(1,i); + + % compute F + F = [0 i*cos(x_(2,i)*i); + 0 1]; + + % Prediction of the plant covariance + P = F*P*F' + G*Q*G'; + % Innovation Covariance + S = H*P*H'+R; + % Kalman's gain + K = P*H'*inv(S); + % State check up and update + x_(:,i) = x_(:,i) + K * (z(i)-z_(i)); + + % Covariance check up and update + P = (eye(2)-K*H)*P; + + sigmaP(:,i)=sqrt(diag(P)); %sigmap is for storing the current error covariance for ploting pourposes +end + +figure(1);clf; hold on; +plot(x(1,:),'-b'); %plot the real plant behavior +plot(z,'.r'); %plot the observations over this plant +plot(x_(1,:),'-g'); %plot the Kalman filter prediction over the plant +plot(xc(1,:),'-m'); %The original thought of the plant +plot(x_(1,:)+2*sigmaP(1,:),'-g'); %These two are the threshold in witch I'm certain that the plant state is at a given time +plot(x_(1,:)-2*sigmaP(1,:),'-g'); + + +figure(2);clf;hold on; +plot(x(2,:),'-b'); %Frecuency estimation +plot(x_(2,:),'-g'); %Frecuency filtered by Kalman + diff --git a/emp_cdf.m b/emp_cdf.m new file mode 100644 index 0000000..07e11ad --- /dev/null +++ b/emp_cdf.m @@ -0,0 +1,41 @@ +function [cn, sn] = emp_cdf(a, b, pltflg); +% Estimate the EMPIRICAL cdf from the fourier coefficients a and b. Note +% that there are more accurate approaches than the simple brut-force +% method. +%-------------------------------------------------------------------------- + +if nargin < 3 + pltflg = false; +end + +[n, p] = size(a); + +% Estimate the power spectral density +c = a.^2 + b.^2; +% c = sqrt(c); +% c = c./(f'*ones(1, p)); +cn = c./(ones(n, 1)*sum(c)); +sn = cumsum(cn); + + +f_ny = 6; +f = (1:n)/n*f_ny; +if pltflg == true + figure + subplot(1,2,1) + plot(f, cn, 'linewidth', 1.5) + xlabel('[cycles/year]') + ylabel('[ ]') + title('Normalized PSD') + axis square + + subplot(1,2,2) + plot(f, sn) + xlabel('[cycles/year]', 'linewidth', 1.5) + ylabel('[ ]') + title('Normalized CDF') + axis square +end + + + diff --git a/emp_ks_tst.m b/emp_ks_tst.m new file mode 100644 index 0000000..12ddaeb --- /dev/null +++ b/emp_ks_tst.m @@ -0,0 +1,85 @@ +function Q_ks = emp_ks_tst(Sn, Pn, alpha) + + + +[N, C] = size(Sn); + +if nargin < 3 + alpha = 0.05; +end + +if N < 41 + load kstable.txt + if alpha == 0.1 + d_alpha = kstable(N, 2); + elseif alpha == 0.05 + d_alpha = kstable(N, 3); + elseif alpha == 0.025 + d_alpha = kstable(N, 4); + elseif alpha == 0.01 + d_alpha = kstable(N, 5); + elseif alpha == 0.005 + d_alpha = kstable(N, 6); + else + error('Alpha-value unknown... ') + end +else + if alpha == 0.1 + d_alpha = 1.07/sqrt(N); + elseif alpha == 0.05 + d_alpha = 1.22/sqrt(N); + elseif alpha == 0.025 + d_alpha = 1.36/sqrt(N); + elseif alpha == 0.01 + d_alpha = 1.52/sqrt(N); + elseif alpha == 0.005 + d_alpha = 1.63/sqrt(N); + else + error('Alpha-value unknown... ') + end +end + + +% If no distribution is provided, the test assumes the cdf for white noise +if nargin < 2 + Pn = (0:1:N-1)'/(N); +end + + + +deltaCDF = abs(Sn - Pn*ones(1,C)); +KSstatistic = max(deltaCDF); + + +% doi_max = max(Sn - Pn*ones(1, C)); +% doi_min = min(Sn - Pn*ones(1, C)); +% +% doi = max(abs(Sn - Pn*ones(1, C))); +% +% Q_ks = zeros(3, C); +% Q_ks(1, doi < d_alpha) = 0; +% Q_ks(1, doi >= d_alpha) = 1; +% Q_ks(2, doi_max > abs(doi_min)) = 1; +% Q_ks(2, doi_max < abs(doi_min)) = -1; +% Q_ks(2, doi_max == abs(doi_min)) = 0; +% Q_ks(3, :) = doi; + +lambda = max((sqrt(N^2/(2*N)) + 0.12 + 0.11/sqrt(N^2/(2*N))).* KSstatistic , 0); +pValue = exp(-2.*lambda.*lambda); + +% Q_ks = zeros(3, C); +% Q_ks(1, alpha >= pValue) = + +Q_ks = alpha >= pValue; + + +fprintf('Testing for white noise... \n'); +fprintf('Test value: %g \n', d_alpha(1)) + + + + + + + + diff --git a/empcopula.m b/empcopula.m new file mode 100644 index 0000000..8032856 --- /dev/null +++ b/empcopula.m @@ -0,0 +1,28 @@ +function C = empcopula(r, s, disc) + +if nargin < 3, disc = 100; end + +n = 1/disc; +u = 0+n/2:n:1-n/2; +v = u; + + + + +% [Xs, r] = sort(X, 'ascend'); +% [Ys, s] = sort(Y, 'ascend'); + +n = length(r); + +% n = length(X); + +% for i = 1:length(u) +% for j = 1:length(v) +% C(i, j) = sum((r/(n+1)<=u(i)).*(s/(n+1)<=v(j)))/n; +% end +% end +for i = 1:length(u) + for j = 1:length(v) + C(i, j) = sum((r<=u(i)).*(s<=v(j)))/n; + end +end \ No newline at end of file diff --git a/emprand.m b/emprand.m new file mode 100644 index 0000000..394a740 --- /dev/null +++ b/emprand.m @@ -0,0 +1,105 @@ +function xr = emprand(x,dist,varargin) +%EMPRAND Generates random numbers from empirical distribution of data. +% This is useful when you do not know the distribution type (i.e. normal or +% uniform), but you have the data and you want to generate random +% numbers form the data. The idea is to first construct cumulative distribution +% function (cdf) from the given data. Then generate uniform random number and +% interpolate from cdf. +% +% USAGE: +% xr = EMPRAND(dist) - one random number +% xr = EMPRAND(dist,m) - m-by-m random numbers +% xr = EMPRAND(dist,m,n) - m-by-n random numbers +% +% INPUT: +% dist - vector of distribution i.e. data values +% m - generates m-by-m matrix of random numbers +% n - generates m-by-n matrix of random numbers +% +% OUTPUT: +% xr - generated random numbers +% +% EXAMPLES: +% % Generate 1000 normal random numbers +% mu = 0; sigma = 1; nr = 1000; +% givenDist = mu + sigma * randn(nr,1); +% generatedDist = emprand(givenDist,nr,1); +% % +% % % Plot histogram to check given and generated distribution +% [n,xout] = hist(givenDist); +% hist(givenDist); +% hold on +% hist(generatedDist,xout) +% % +% Plot cdf to check given and generated distribution +% figure +% x = sort(givenDist(:)); % Given distribution +% p = 1:length(x); +% p = p./length(x); +% plot(x,p,'color','r'); +% hold on +% +% xr = sort(generatedDist(:)); % Generated distribution +% pr = 1:length(xr); +% pr = pr./length(xr); +% +% plot(xr,pr,'color','b'); +% xlabel('x') +% ylabel('cdf') +% legend('Given Dist.','Generated Dist.') +% title('1000 random numbers generated from given normal distribution of data'); +% +% HISTORY: +% version 1.0.0, Release 05-Jul-2005: Initial release +% version 1.1.0, Release 16-Oct-2007: Some bug fixes and improvement of help text +% 1. Can handle NaN values in dist +% 2. Extraplolate for out of range +% 3. Calling function EMPCDF is included within this function +% +% See also: + +% Author: Durga Lal Shrestha +% UNESCO-IHE Institute for Water Education, Delft, The Netherlands +% eMail: durgals@hotmail.com +% Website: http://www.hi.ihe.nl/durgalal/index.htm +% Copyright 2004-2007 Durga Lal Shrestha. +% $First created: 05-Jul-2005 +% $Revision: 1.1.0 $ $Date: 16-Oct-2007 21:47:47 $ + +% *********************************************************************** +%% INPUT ARGUMENTS CHECK +% +% error(nargchk(1,3,nargin)); +if ~isvector(dist) + error('Invalid data size: input data must be vector') +end +if nargin == 3 + m = varargin{1}; + n = m; +elseif nargin == 4 + m = varargin{1}; + n = varargin{2}; +else + m = 1; + n = 1; +end + +%% COMPUTATION +% % x = dist(:); +% % Remove missing observations indicated by NaN's. +% t = ~isnan(x); +% x = x(t); + +% Compute empirical cumulative distribution function (cdf) +% xlen = length(x); +% x = sort(x); +% p = 1:xlen; +% p = p./xlen; +p = dist; + +% Generate uniform random number between 0 and 1 +ur = rand(m,n); +% keyboard +% Interpolate ur from empirical cdf and extraplolate for out of range +% values. +xr = interp1(x,p,ur,'linear','extrap'); diff --git a/ens_bounds.m b/ens_bounds.m new file mode 100644 index 0000000..73ab241 --- /dev/null +++ b/ens_bounds.m @@ -0,0 +1,12 @@ +function [mins, maxs] = ens_bounds(varargin) + + +nts = size(varargin{1}, 1); + +for i = 1:nts + for j = 1:length(varargin) + tmp(j, :) = varargin{j}(i, :); + end + mins(i, :) = min(tmp); + maxs(i, :) = max(tmp); +end diff --git a/eof.m b/eof.m new file mode 100644 index 0000000..158ba30 --- /dev/null +++ b/eof.m @@ -0,0 +1,158 @@ +function [eofs, pcs, lams, recon] = eof_ana(inpt, varargin); + + + +pp = inputParser; +pp.addRequired('inpt', @(x) (iscell(x) | isnumeric(x))); + +pp.addParamValue('weightflg', true, @islogical); +pp.addParamValue('theta', (89.75:-0.5:-89.75)', @isnumeric); +pp.addParamValue('dlambda', 0.5, @isnumeric); +pp.addParamValue('clms', [4 5 9], @isnumeric); +pp.addParamValue('miss', -9999, @(x) (isnumeric(x) | strcmp(x, 'NaN'))); +pp.addParamValue('mask', 0, @isnumeric); +pp.addParamValue('mxmde', 0, @isint); +pp.addParamValue('dorecon', true, @islogical); +pp.addParamValue('addmn', true, @islogical); +pp.addParamValue('containsmiss', false, @islogical); + +pp.parse(inpt, varargin{:}); + +weightflg = pp.Results.theta; +theta = pp.Results.theta; +dlambda = pp.Results.dlambda; +clms = pp.Results.clms; +miss = pp.Results.miss; +mask = pp.Results.mask; +mxmde = pp.Results.mxmde; +dorecon = pp.Results.dorecon; +addmn = pp.Results.addmn; +containsmiss = pp.Results.containsmiss; +[rws, cls] = size(inpt{1, clms(3)}); + +% Compute weight factors (cos(theta)). +if weightflg == true + if size(theta, 2) > size(theta, 1) + theta = theta'; + end + weights = cos(theta*pi/180)*ones(1, cls); +else + weights = ones(rws, cls); +end + + + +% Domain selection with a binary mask +if size(mask) == [1 1] + if containsmiss == true + + mask = ones(360, 720); + mask(inpt{1, clms(3)} == miss) = 0; + mask_vec = mask(:); + c_indx = find(mask_vec == 1); + + for i = 1:size(inpt, 1) + tmp = inpt{i, clms(3)}.*weights; + F(i,:) = tmp(mask == 1)'; + end + + else + for i = 1:size(inpt, 1) + tmp = inpt{i, clms(3)}.*weights; + F(i,:) = tmp(:)'; + end + c_indx = ones(rws*cls,1); + end +elseif size(mask) == [rws, cls] + + if containsmiss == true + mask(inpt{1, clms(3)} == miss) = 0; + end + mask_vec = mask(:); + c_indx = find(mask_vec == 1); + + for i = 1:size(inpt, 1) + tmp = inpt{i, clms(3)}.*weights; + F(i,:) = tmp(mask == 1)'; + end +end + +[n, p] = size(F); + + +% Removing the mean from the data +mn_F = mean(F,1); +F_prime = F - ones(n, 1)*mn_F; + +% Eigenvectors and eigenvalues of the covariance matrix R = F'*F +if mxmde == 0 | mxmde == p + [U, P, eofs] = svd(F); +else + [U, P, eofs] = svds(F, mxmde); +end + +% Compute the eigenvalues and the fraction of the variance explained +lams(:,1) = diag(P).^2; +lams(:,2) = lams*100/sum(lams); + +% Compute the principal components +pcs = F_prime*eofs; +pcs = [cell2mat(inpt(:, clms(1))) cell2mat(inpt(:, clms(2))) pcs]; + +% Compute the map for the eofs and reconstruct the data +if dorecon == true + + for i = 1:size(eofs,2) + tmp = zeros(rws*cls, 1)*NaN; + tmp(c_indx,1) = eofs(:,i); + + recon.eofs{i,1} = reshape(tmp, rws, cls); + end + + + F_recon = pcs(:, 3:end)*eofs'; + + mn_fld = zeros(rws*cls, 1)*NaN; + if addmn == true + mn_fld(c_indx,1) = mn_F(:); + end + + for i = 1:size(F_recon, 1) + tmp = zeros(rws*cls, 1)*NaN; + tmp(c_indx, 1) = F_recon(i,:)'; + + recon.F{i,1} = inpt{i, clms(1)}; + recon.F{i,2} = inpt{i, clms(2)}; + recon.F{i,3} = reshape(tmp + mn_fld, rws, cls); + end +else + recon = 0; +end + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/eof_ana.m b/eof_ana.m new file mode 100644 index 0000000..a26ade3 --- /dev/null +++ b/eof_ana.m @@ -0,0 +1,540 @@ +function [eofs, pcs, lams, varargout] = eof_ana(inpt, varargin); +% The function computes the Empirical orthagonal functions, principal +% components and the eigenvalues for a set of input fields. +%-------------------------------------------------------------------------- +% Input (mandatory): +% - inpt {m x 1} Cell array which contains the input fields. +% +% Input (optional): +% - theta [i x 1] Vector containing the latitudes of the elements of +% the input fields. Theta is only needed if areawghts +% is set to true +% Default: (89.75:-0.5:-89.75)' +% +% - miss [1 x 1] Scalar (or NaN) representing missing values in the +% input fields. The parameter is only needed if +% cntsmiss is set to true. +% Default: -9999 +% +% - mask [i x j] Optional binary map which can be applied to perform +% a domain selection. The map must have the same +% dimensions as the input fields and its elements must +% be either 0 or 1. +% +% - mxmde [1 x 1] Highest mode of the singular value decomposition. +% Set to 0 if the highest mode should be determined +% from the data. +% Default: 0 +% +% - rec_eofs (logical) If set to true, the function will compute maps of +% the eigenvectors (the EOFs) and reconstruct the data +% from the PCs and the EOFs. +% Default: true +% +% - addmn (logical) By default, the input data is centralized before the +% computation of the EOFs. To better compare the +% reconstructed data with the input fields, the mean +% can be added back to the maps. +% Default: true +% +% - remmn (logical) Prior to the computation of the EOFs, the temporal +% mean should be removed from the input data. In this +% case, the matrix product R = F'*F represents the +% covariance matrix of the input data. +% Default: true +% +% - cntsmiss (logical) In some cases, the input dataset contains missing +% values. The function removes these values from the +% analysis. By default, it is assumed that missing +% elements contain -9999. In any other case, the +% parameter miss must be set to the appropriate value. +% Default: false +% +% - areawght (logical) In most cases, latitude-dependent area weights (the +% square root of the cosine of the latitude) are +% applied to account for meridional convergence, i.e. +% the area of the grid cells decreases towars high +% latitudes. If set to true, the parameter theta must +% agree with the latitudes of the input fields. +% Default: true +% +% - decompdim (string) The dimension in which the decomposition shall be +% performed. By default, the function does a temporal +% decomposition. +% Default: temp +% +% - pltflg (logical) If pltflg is set to true, some plots are created +% during the EOF-analysis +% +% - nrmflgeof (logical)If normflg is set to true, the EOFs are normalized +% so that the length of each vector is 1. +% Default: true +% +% - nrmflgdta (logical)If nrmflgdta is set to true, the data is normalized +% before the EOF decomosition. In this case, the EOFs +% do not consider the amplitude of the signal but only +% the variability. This helps if the EOFS of different +% quantities (e.g. precipitation and temperature) +% should be compared. +% Default: false +% quantitiers +% +% Output: +% - eofs Matrix which contains the normalized eigenvectors +% (i.e. the EOFs of the covariance matrix) where the +% kth column contains the EOF of the kth mode. +% +% - pcs Principal components of the EOFs which account for +% their temporal variability. The matrix contains +% time-series for each principal component. +% +% - lams Eigenvalues of the covariance matrix R = F'*F. The +% first column contains the absolute values. The +% second column contains the percentage of the total +% explained variance (i.e. the sum of all eigenvalues) +% while the elements of the third column are the +% cumulative squared covariance fraction. +% +% - recon Structure parameter which contains maps of the +% eigenvectors and the reconstructed input data from +% the EOFs and the PCs. +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: January 201 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- +fprintf('------------------------------------------------------------ \n') +fprintf(' EOF - Analysis \n') +fprintf('------------------------------------------------------------ \n') + + +% ------------------------------------------------------------------------- +% Input checking and setting defalut values +% ------------------------------------------------------------------------- + +pp = inputParser; +pp.addRequired('inpt', @(x) (iscell(x) | isnumeric(x))); + +pp.addParamValue('theta', (89.75:-0.5:-89.75)', @isnumeric); +pp.addParamValue('miss', -9999, @(x) (isnumeric(x) | strcmp(x, 'NaN'))); +pp.addParamValue('mask', [], @isnumeric); +pp.addParamValue('mxmde', 0, @isint); + +pp.addParamValue('dorecon', true, @islogical); +pp.addParamValue('addmn', true, @islogical); +pp.addParamValue('remmn', true, @islogical); +pp.addParamValue('cntsmiss', false, @islogical); +pp.addParamValue('areawght', true, @islogical); +pp.addParamValue('decompdim', 'temp', @ischar); +pp.addParamValue('nrmflgeof', true, @islogical); +pp.addParamValue('normstd', false, @islogical); +pp.addParamValue('pltflg', false, @islogical); +pp.addParamValue('rotflg', false, @islogical); +pp.addParamValue('maxit', 1000, @isint); +pp.addParamValue('normrot', true, @islogical); +pp.addParamValue('clms', [3 4 8], @isnumeric); +pp.addParamValue('recon_eofs', true, @islogical); +pp.addParamValue('recon_data', true, @islogical); +pp.addParamValue('recon_mode', 0, @isnumeric); +pp.addParamValue('comp_errs', true, @islogical); +pp.addParamValue('corrmps', false, @islogical); +pp.addParamValue('eeof', false, @islogical); +pp.addParamValue('eeof_lags', 0, @isnumeric); +pp.addParamValue('eeof_dt', 0, @isnumeric); + +pp.parse(inpt, varargin{:}); + +areawght = pp.Results.areawght; +theta = pp.Results.theta; +miss = pp.Results.miss; +mask = pp.Results.mask; +mxmde = pp.Results.mxmde; +dorecon = pp.Results.dorecon; +addmn = pp.Results.addmn; +remmn = pp.Results.remmn; +cntsmiss = pp.Results.cntsmiss; +decompdim = pp.Results.decompdim; +nrmflgeof = pp.Results.nrmflgeof; +rotflg = pp.Results.rotflg; +maxit = pp.Results.maxit; +normrot = pp.Results.normrot; +normstd = pp.Results.normstd; +pltflg = pp.Results.pltflg; +clms = pp.Results.clms; +recon_eofs = pp.Results.recon_eofs; +recon_data = pp.Results.recon_data; +recon_mode = pp.Results.recon_mode; +comp_errs = pp.Results.comp_errs; +corrmps = pp.Results.corrmps; +eeof = pp.Results.eeof; +eeof_lags = pp.Results.eeof_lags; +eeof_dt = pp.Results.eeof_dt; + +clear pp + + +% Get the time information and the cells which contain the data +mnths = cell2mat(inpt(:, clms(1))); +yrs = cell2mat(inpt(:, clms(2))); +inpt = inpt(:, clms(3)); + +% Compute the size of the input fields and the number of available samples +% (i.e. the number of time-steps). It is assumed that the number of +% time-steps corresponds to the number of availab612le fields in the input +% dataset. +[rws, cls] = size(inpt{1}); +nts = length(inpt); + + +% ------------------------------------------------------------------------- +% Computation of area weights +% ------------------------------------------------------------------------- +% Compute weight factors. In some publications the weight factors are +% simply the cosines of the latitute. But it is more convenient to use the +% square root of the cosines as this will ensure the correct weighting of +% the covariance matrix, i.e C_w = cos(lat)*F'*F; +if areawght == true + fprintf('EOF-ana -> Computing the weight factors! \n') + if size(theta, 2) > size(theta, 1) + theta = theta'; + end + weights = sqrt(cos(theta*pi/180))*ones(1, cls); +else + weights = ones(rws, cls); +end + + +% ------------------------------------------------------------------------- +% Domain selection and array re-ordering +% ------------------------------------------------------------------------- +% Domain selection with a binary mask. Note that the computation of EOFs +% has a strong domain dependency, i.e the results might differ +% significantly based on the selected domain. +% If no mask is applied, the function generates a mask to account for +% missing elements in the input data, i.e. grid points where no values +% are available +if isempty(mask) + fprintf('EOF-ana -> No mask is applied! \n') + mask = ones(rws, cls); +elseif size(mask) == [rws, cls] + fprintf('EOF-ana -> Applying a binary mask! \n') +else + warning('Mask does not have the correct input format! \n') + warning('Skipping the mask! \n') + mask = ones(rws, cls); +end + +if cntsmiss == true + fprintf('EOF-ana -> Data contains missing values! \n') + % If the input data contains missing values, these have to be removed + % before performing the EOF decomposition + for i = 1:nts + mask(inpt{i} == miss) = 0; + end +end + +% Now we have a mask which contains only those elements with which the EOF +% decomposition can be performed +fprintf('EOF-ana -> Rearange the cell-array in a matrix \n') +[F, c_indx] = cell2catchmat(inpt, mask); + +if areawght == true + fprintf('EOF-ana -> Apply area weights! \n') + weights = weights(:); + weights = weights(c_indx); +% F = F*diag(weights); + F = F.*(ones(nts,1)*weights'); + % We do not need the weights any more... + clear weights +end + +if normstd == true + fprintf('EOF-ana -> Normalize the input matrix by the stds. \n') + norm_std = std(F); + F = F*diag(1./norm_std); +end + + + +% % For a spatial decompositon, we transpose the matrix F. +% if strcmp(decompdim, 'spat') +% fprintf('EOF-ana -> Spatial decompositon! \n') +% F = F'; +% end +% CAN BE COMPUTED LATER!!!!!!!! + + + +% ------------------------------------------------------------------------- +% Centralize the input dataset +% ------------------------------------------------------------------------- +% Removing the sample mean from the data. This ensures that R = F'*F can be +% interpreted as the covariance matrix (which is not done explicitly in +% this function as the SVD-approach is used to compute the EOFs). However, +% the EOFs can be also computed without removing the mean from the data +% (set remmn to false) but the results are more difficult to interpret. +if remmn == true + fprintf('EOF-ana -> Removing the mean... \n') + mn_F = mean(F,1); + F = F - ones(nts, 1)*mn_F; +end + + + + +if eeof == true + fprintf('EOF-ana -> Building lagged data matrix! \n') + npts = size(F, 2); + nr_lags = length(eeof_lags); + F_old = F; + F = zeros(nts - eeof_lags(end), nr_lags*npts); + + for i = 1:length(eeof_lags) + indx = (eeof_lags(i)+1:nts-eeof_lags(end)+eeof_lags(i))'; + F(:, (i-1)*npts + 1 : i*npts) = F_old(indx, :); + end +end +% keyboard +% ------------------------------------------------------------------------- +% Compute EOFs, PCs and eigenvalues through SVD +% ------------------------------------------------------------------------- +% Compute left and right singular vecors and the singular values of the +% input data F througth F = U*P*V' +% The right singular vectors V contain the eigenvectors (EOFs) of the +% covariance matrix R = F'*F while the (diagonal) matrix P contains the +% square roots of the eigenvalues of R. +if mxmde == 0 | mxmde == nts + mxmde = nts; + [U, P, eofs] = svd(F, 'econ'); +elseif mxmde == rws*cls + [U, P, eofs] = svd(F, 'econ'); +else + [U, P, eofs] = svds(F, mxmde); +end + +% Compute the eigenvalues and the fraction of the variance explained +% For the eigenvalues, we have to divide through the sample size (which is +% the number of time-steps). This is due to the fact that the squared +% elements of P are the eigenvalues of the covariance matrix R = F'*F, but +% it is more reasonable to divide the covariance throught the number of +% samples, i.e. R = (1/(N-1))*F'*F. +% The function further computes the squared covariance explained (SCF) and +% the cumulative squared covariance fraction (CSCF) of the covariance +% matrix R. +lams(:,1) = (1:size(P, 1))'; +lams(:,2) = (diag(P).^2)/(size(P,1)-1); % Eigenvalues +lams(:,3) = lams(:,2)./sum(lams(:,2))*100; % SCF +lams(:,4) = cumsum(lams(:,3))./sum(lams(:,3))*100; % CSCF + +if pltflg == true + eigplot(P, lams); +end + + + + +% The EOFs are normalized such that the sum of squares for each EOF pattern +% equals one. To denormalize the returned EOFs multiply by the square root +% of the associated eigenvalue. +if nrmflgeof == true + eofs = eofs./(ones(length(eofs),1)*sum(eofs.^2).^(1/2)); +end + +% Compute the principal components which are row vectors containing +% time-series for each mode. They are simply the projection of F onto the +% EOF of each mode. + +pcs = U*P; + +if eeof == true + if mxmde == nts + mxmde = size(pcs, 1); + end +% keyboard + nts = size(pcs, 1); + pcs = [0 0 0 (1:mxmde) ; + mnths(1:nts) yrs(1:nts) datenum(yrs(1:nts), mnths(1:nts), ones(nts,1)*15) pcs]; +else + pcs = [0 0 0 (1:mxmde) ; + mnths yrs datenum(yrs, mnths, ones(nts,1)*15) pcs]; +end + + +% Still under construction..... +% ------------------------------------------------------------------------- +% Optional: Rotation of EOFs +% ------------------------------------------------------------------------- +if rotflg == true + [pcs(2:end, 4:end), r] = varimax(pcs(2:end, 4:end)); + eofs = eofs*r; +end + + +% ------------------------------------------------------------------------- +% Reconstruction of the EOFs and the input data +% ------------------------------------------------------------------------- +% For the spatial representation of the EOFs, the function computes maps +% for each single mode and reconstructs the input data from the EOFs. +if recon_data == true + if recon_mode == 0 + recon_mode = mxmde; + end + % Each row of F_recon represents the reconstruction (i.e. the map) of + % the input data from the PCs and the EOFs at one time-step. Thus, the + % matrix has a total of [nts] rows. + fprintf('EOF-ana -> Reconstructing the input data from the truncated set of EOFS ') + if eeof == true + F_recon = recon_eeofs(eofs, pcs(2:end, 4:recon_mode+3), eeof_lags, recon_mode); +% pcs_md = [zeros(eeof_lags(end), recon_mode); pcs(2:end, 4:recon_mode+3); zeros(eeof_lags(end), recon_mode)]; +% eofs_md = zeros(nr_lags*recon_mode, npts); +% bigmat = zeros(nts + eeof_lags(end), nr_lags*recon_mode); +% +% for i = 1:nr_lags +% bigmat(:, (i-1)*recon_mode+1:i*recon_mode) = ... +% pcs_md(eeof_lags(end)+1-eeof_lags(i):size(pcs_md, 1)-eeof_lags(i), 1:recon_mode); +% eofs_md((i-1)*recon_mode+1:i*recon_mode, :) = eofs((i-1)*npts+1:i*npts, 1:recon_mode)'; +% end +% div = zeros(size(bigmat)); +% div(bigmat ~= 0) = 1; +% div = 1./(sum(div, 2)/recon_mode); +% F_recon = repmat(div, 1, npts).*(bigmat*eofs_md); + else + F_recon = pcs(2:end, 4:recon_mode+3)*eofs(:, 1:recon_mode)'; + end + + + + + % To better compare the reconstruced data with the input data, the mean + % must be added back to the maps. This step also ensures that unwanted + % elements in the input dataset, i.e. where the binary map contains + % zeros, are set to NaN in the reconstructed maps. + if addmn == true + F_recon = F_recon + ones(size(F_recon, 1), 1)*mn_F; + end + + % The reconstruction of the input data is performed by reshaping each + % row of the F_recon matrix to a matrix with [rws, cls] dimensions. + + F_recon_map = catchmat2cell(F_recon, c_indx, rws, cls, NaN); + F_recon_map = [num2cell(mnths), num2cell(yrs), F_recon_map]; + + fprintf(' Done! \n') + varargout{1} = F_recon_map; + + if comp_errs == true + if eeof == true + F_errs = F_old - F_recon; + else + F_errs = F - F_recon; + end + F_errs_map = catchmat2cell(F_errs, c_indx, rws, cls, NaN); + F_errs_map = [num2cell(mnths), num2cell(yrs), F_errs_map]; + varargout{2} = F_errs_map; + end + +end + +if recon_eofs == true + % The reconstruction of the EOFs is performed by reshaping each + % row of the eof matrix to a matrix with [rws, cls] dimensions. + fprintf('EOF-ana -> Remapping of the EOFS to the size of the input') + if eeof == true + eofs_mat = eofs; + clear eofs + for i = 1:nr_lags + tmp = catchmat2cell(eofs_mat((i-1)*npts + 1 : i*npts, :)', c_indx, rws, cls, NaN); + eofs(:, i) = tmp; + clear tmp + end + else + eofs = catchmat2cell(eofs', c_indx, rws, cls, NaN); + end + fprintf(' Done! \n') +end + +if corrmps == true + % Computation of correlation maps between the full input data and the + % individual PCs + if eeof == true + corrs = corr(F_old(1:end-max(eeof_lags), :), pcs(2:end, 4:end)); + else + corrs = corr(F, pcs(2:end, 4:end)); + end + corrs_map = catchmat2cell(corrs', c_indx, rws, cls, NaN); + varargout{3} = corrs_map; +end + + + + + + + + + + + + + + + + + + + + + + +function [] = eigplot(P, lams); + mde = 0:size(lams, 1) - 1; + + scrsz = get(0,'ScreenSize'); + figure('OuterPosition',[1 scrsz(4)/2 scrsz(3)/2 scrsz(4)/2]) + + subplot(1,4,1) + plot(mde, diag(P), 'x', 'linewidth', 1.5); + title('Singular values') + xlabel('Mode') + axis square + + subplot(1,4,2) + plot(mde, lams(:,2), 'x', 'linewidth', 1.5); + title('Eigenvalues') + xlabel('Mode') + axis square + + subplot(1,4,3) + plot(mde, lams(:,3), 'x', 'linewidth', 1.5); + title('Squared covariance explained') + xlabel('Mode') + ylabel('%') + axis square + axis([0 max(mde) 0 100]) + + subplot(1,4,4) + plot(mde, lams(:,4), 'x', 'linewidth', 1.5); + title('Squared covariance fraction') + xlabel('Mode') + ylabel('%') + axis square + axis([0 max(mde) 0 100]) + + + + + + + + + + + + + + + + + + diff --git a/eof_new.m b/eof_new.m new file mode 100644 index 0000000..c1f20ee --- /dev/null +++ b/eof_new.m @@ -0,0 +1,73 @@ +function [L, EOFs, EC, error, norms] = eof_new( U, n, norm, varargin ) +% EOF - computes EOF of a matrix. +% +% Usage: [L, EOFs, EC, error, norms] = EOF( M, num, norm, ... ) +% +% M is the matrix on which to perform the EOF. num is the number of EOFs to +% return. If num='all', then all EOFs are returned. This is the default. +% +% If norm is true, then all time series are normalized by their standard +% deviation before EOFs are computed. Default is false. In this case, +% the fifth output argument will be the standard deviations of each column. +% +% ... are extra arguments to be given to the svds function. These will +% be ignored in the case that all EOFs are to be returned, in which case +% the svd function is used instead. Use these with care. +% +% Data is not detrended before handling. Use the detrend function to fix +% that. +% +% L are the eigenvalues of the covariance matrix ( ie. they are normalized +% by 1/(m-1), where m is the number of rows ). EC are the expansion +% coefficients (PCs in other terminology) and error is the reconstruction +% error (L2-norm). +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +% $Id: EOF.m,v 1.3 2003/06/01 22:20:23 dmk Exp $ +% +% Copyright (C) 2001 David M. Kaplan +% Licence: GPL (Gnu Public License) +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +if nargin < 2 + n = 'all'; +end + +if nargin < 3 + norm = 0==1; +end + +s = size(U); +ss = min(s); + +% Normalize by standard deviation if desired. +if norm + norms = std(U); +else + norms = ones([1,s(2)]); +end +% U = U * diag(1./norms); + +% Do SVD +if (ischar(n) & n == 'all') | n >= ss + % Use svd in case we want all EOFs - quicker. + [ C, lambda, EOFs ] = svd( full(U) ); +else + % Otherwise use svds. + [ C, lambda, EOFs, flag ] = svds( U, n, varargin{:} ); + + if flag % Case where things did not converge - probably an error. + warning( 'HFRC_utility - Eigenvalues did not seem to converge!!!' ); + end + +end + +% Compute EC's and L +EC = C * lambda; % Expansion coefficients. +L = diag( lambda ) .^ 2 / (s(1)-1); % eigenvalues. + +% Compute error. +diff=(U-EC*EOFs'); +error=sqrt( sum( diff .* conj(diff) ) ); \ No newline at end of file diff --git a/eof_simple.m b/eof_simple.m new file mode 100644 index 0000000..f36cbf4 --- /dev/null +++ b/eof_simple.m @@ -0,0 +1,29 @@ +function [eofs, pcs, lams] = eof_simple(F, mxmde); + + +[n, p] = size(F); + +if nargin < 2 + mxmde = p; +end + +% Removing the mean +F_prime = F - ones(n,1)*mean(F,1); + +% Computation of the (small) covariance matrix +L = F_prime*F_prime'; + +% Eigenvectors and eigenvalues for the covariance matrix +if mxmde == p + [U, P, V] = svd(F); +else + [U, P, V] = svds(F, mxmde); +end + +lams(:,1) = diag(P).^2; +lams(:,2) = lams/sum(lams); + +eofs = V; +pcs = F*eofs; + + diff --git a/errperf.m b/errperf.m new file mode 100644 index 0000000..8b5db34 --- /dev/null +++ b/errperf.m @@ -0,0 +1,258 @@ +function V=errperf(T,P,M) + +%ERRPERF Determine various error related performance metrics. +% +% ERRPERF(T,P,M) uses T and P, which are target and prediction vectors +% respectively, and returns the value for M, which is one of several error +% related performance metrics. +% +% T and P can be row or column vectors of the same size. M can be one of +% the following performance metrics: +% +% mae (mean absolute error) +% mse (mean squared error) +% rmse (root mean squared error) +% +% mare (mean absolute relative error) +% msre (mean squared relative error) +% rmsre (root mean squared relative error) +% +% mape (mean absolute percentage error) +% mspe (mean squared percentage error) +% rmspe (root mean squared percentage error) +% +% EXAMPLE: +% +% rand('state',0) +% +% T = [0:0.2:1] +% P = rand(size(T)).*T +% +% errperf(T,P,'mae') returns 0.1574 +% +% To compute the relevant performance metric, the function uses recursion +% to first compute one or more error vectors. The function can therefore +% secondarily be used to compute these error vectors. M can therefore also +% be one of the following: +% +% e (errors) +% ae (absolute errors) +% se (squared errors) +% +% re (relative errors) +% are (absolute relative errors) +% sre (squared relative errors) +% +% pe (percentage errors) +% ape (absolute percentage errors) +% spe (squared percentage errors) +% +% REMARKS: +% +% - The Neural Network Toolbox also has functions to compute mae and mse. +% This function does not make use of the toolbox. +% +% - Percentage error equals relative error times 100. +% +% - The abbreviations used in the code, and the calculation tree are +% documented in a comments section within the file. +% +% VERSION: 20070703 +% MATLAB VERSION: 7.4.0.287 (R2007a) +% LICENSE: As-is; public domain +% +% See also MAE, MSE. + +%{ +VERSION HISTORY: +20070703: - Added MATLAB version check. +20070606: - Added support for metrics MARE, MSRE, and RMSRE. + - Addressed a possible division by zero condition in calculating + relative and percentage errors. +20070528: - Original version. + +KEYWORDS: +perf, performance, metric, performance measure, machine learning +%} + +%% Comments + +%{ + +Abbreviations: + +a: absolute +e: error(s) +M: METRIC +m: mean +P: PREDICTIONS +p: percentage +r: relative (if before e) +r: square root (if before m) +s: squared +T: TARGETS +V: VALUE(S) + +Calculation tree: + +e +| +|-ae-mae +| +|-se-mse-rmse +| +|-re-pe + | | + | |-ape-mape + | | + | |-spe-mspe-rmspe + | + |-are-mare + | + |-sre-msre-rmsre + +%} + +%% Check MATLAB version + +if datenum(version('-date')) throw away second half +fftx = fftx(1:NumUniquePts, :); + +% Obtain the a and b values seperately +a = (fftx + conj(fftx)) / (n); +b = 1i * (fftx - conj(fftx)) / (n); + +% As we threw away the redundant half of the spectrum, we have to multiply +% both a and b by 2 to maintain the same energy However, this is usually +% not done for the DC (and the Nyquist) component of the FFT. + +% Odd nfft -> a(1) = DC, no Nyquist component +a(1,:) = a(1,:) / 2; + +% Compute the amplitude spectrum +Y = abs(fftx)/n; +Y = [Y(1, :); 2*Y(2:end, :)]; + +% Compute the Periodogram +P = abs(fftx).^2/(Fs*n); +P = [P(1, :); 2*P(2:end, :)]; + +if rem(n,2) == 0, + % Even nfft -> a(1) = DC, a(end) = Nyquist + a(end,:) = a(end,:)/2; + Y(end, :) = 1/2*Y(end, :); + P(end, :) = 1/2*P(end, :); +end + + +% Compute the corresponding frequency +f = 0 : Fs/n : Fs/2; + +AB = [a b]; + + diff --git a/fill_ts.m b/fill_ts.m new file mode 100644 index 0000000..d7cc022 --- /dev/null +++ b/fill_ts.m @@ -0,0 +1,45 @@ +function TS_out = fill_ts(ts_in, ref_vec, tscale) + +if nargin < 3, tscale = 'monthly'; end +if nargin < 2 + + if strcmp(tscale, 'monthly') + ref_vec = dtevec([ts_in(1,1), ts_in(1,2)], ... + [ts_in(end,1) ts_in(end,2)], ... + 'monthly'); + + elseif strcmp(tscale, 'daily') + ref_vec = dtevec([ts_in(1,1) ts_in(1,2) ts_in(1,3)], ... + [ts_in(end,1) ts_in(end,2) ts_in(end,3)], ... + 'daily'); + + + end +end + +num_indx = size(ref_vec, 2); + + +ts_dte = ts_in(:,num_indx); +TS_out = zeros(length(ref_vec), size(ts_in, 2)); + +for i = 1:length(ref_vec) + if find(ts_dte == ref_vec(i,num_indx)); + indx(i,1) = find(ts_dte == ref_vec(i,num_indx)); + else + indx(i,1) = NaN; + end +end + +for i = 1:length(indx) + TS_out(i, 1:num_indx) = ref_vec(i, :); + if ~isnan(indx(i)) + TS_out(i, num_indx+1:end) = ts_in(indx(i), num_indx+1:end); + else + TS_out(i, num_indx+1:end) = zeros(1, size(ts_in, 2)-num_indx)*NaN; + end +end + + + + \ No newline at end of file diff --git a/fillts.m b/fillts.m new file mode 100644 index 0000000..693fdbf --- /dev/null +++ b/fillts.m @@ -0,0 +1,63 @@ +function tsb = fillts(inpt, clms); + +mnths = inpt(:, clms(1)); +yrs = inpt(:, clms(2)); +ts = inpt(:, clms(3)); + +% 1. Find missing months +mflg = mnths(2:end) - mnths(1:end-1); + +missing = find(mflg ~= 1 & mflg ~= -11); + +ts_n = []; + +for i = 1:length(missing) + if i == 1 + tmp = ts(1:missing(1)); % ts until the missing element + if mflg(missing(i)) > 0 % missing element during the year + rep = ones(mflg(missing(i))-1,1)*NaN; + elseif mflg(missing(i)) < 0 + rep = ones(11 + mflg(missing(i)),1)*NaN; + end + ts_n = [tmp; rep]; + + elseif i == length(missing) + tmp = ts(missing(i-1)+1:missing(i)); + if mflg(missing(i)) > 0 % missing element during the year + rep = ones(mflg(missing(i))-1,1)*NaN; + elseif mflg(missing(i)) < 0 + rep = ones(11 + mflg(missing(i)),1)*NaN; + end + ts_n = [ts_n; tmp; rep; ts(missing(i)+1:end)]; + + else + tmp = ts(missing(i-1)+1:missing(i)); + if mflg(missing(i)) > 0 % missing element during the year + rep = ones(mflg(missing(i))-1,1)*NaN; + elseif mflg(missing(i)) < 0 + rep = ones(11 + mflg(missing(i)),1)*NaN; + end + ts_n = [ts_n; tmp; rep]; + end +end + +mn_n = zeros(size(ts_n)); +yr_n = zeros(size(ts_n)); +snr = zeros(size(ts_n)); + +mn_n(1) = mnths(1); +yr_n(1) = yrs(1); +snr(1) = datenum(yrs(1), mnths(1), 15); + +for i = 2:length(ts_n) + mn_n(i) = mn_n(i-1) + 1; + yr_n(i) = yr_n(i-1); + + if mn_n(i) == 13 + mn_n(i) = 1; + yr_n(i) = yr_n(i) + 1; + end + snr(i) = datenum(yr_n(i), mn_n(i), 15); +end + +tsb = [mn_n yr_n snr ts_n]; diff --git a/find_sim_tspts.m b/find_sim_tspts.m new file mode 100644 index 0000000..eaaca11 --- /dev/null +++ b/find_sim_tspts.m @@ -0,0 +1,55 @@ +function [fld1_out, fld2_out] = find_sim_tspts(fld1, fld2, tref, dailydta); + + +if nargin < 4, dailydta = 0; end +if nargin < 3, tref = 1; end + +if tref == 1 + if dailydta == 1 + dtes_1 = fld1(2:end, 4); + dtes_2 = fld2(2:end, 4); + + sdte_1 = dtes_1(1); + edte_1 = dtes_1(end); + + sdte_2 = dtes_2(1); + edte_2 = dtes_2(end); + else + dtes_1 = fld1(2:end, 3); + dtes_2 = fld2(2:end, 3); + + sdte_1 = dtes_1(1); + edte_1 = dtes_1(end); + + sdte_2 = dtes_2(1); + edte_2 = dtes_2(end); + end +end + + +if sdte_1 <= sdte_2 + sindx_1 = find(dtes_1 == sdte_2); + sindx_2 = 1; +elseif sdte_2 < sdte_1 + sindx_1 = 1; + sindx_2 = find(dtes_2 == sdte_1); +end + +if isempty(sindx_1) | isempty(sindx_2) + error('Datasets do not cover the same time-period!') +end + +if edte_1 <= edte_2 + eindx_1 = length(dtes_1); + eindx_2 = find(dtes_2 == edte_1); +elseif edte_2 < edte_1 + eindx_1 = find(dtes_1 == edte_2); + eindx_2 = length(dtes_2); +end + +if isempty(eindx_1) | isempty(eindx_2) + error('Datasets do not cover the same time-period!') +end + +fld1_out = [fld1(1, :); fld1(sindx_1 + 1 : eindx_1 + 1, :)]; +fld2_out = [fld2(1, :); fld2(sindx_2 + 1 : eindx_2 + 1, :)]; diff --git a/findindx.m b/findindx.m new file mode 100644 index 0000000..839e5f1 --- /dev/null +++ b/findindx.m @@ -0,0 +1,11 @@ +function otpt = findindx(inpt, indx_vec) + + +otpt(:, 1:3) = inpt(:, 1:3); + +for i = 1:length(indx_vec) + indx = find(inpt(1, :) == indx_vec(i)); + otpt(:, i+3) = inpt(:, indx); +end + + \ No newline at end of file diff --git a/findtstps.m b/findtstps.m new file mode 100644 index 0000000..3a315b8 --- /dev/null +++ b/findtstps.m @@ -0,0 +1,126 @@ +function [otpt] = findtstps(fld, period, tscale, clms) +% The function selects the time-steps between a start and end year from a +% given dataset which can be either a matrix or a cell array. + +%-------------------------------------------------------------------------- +% Input: fld matrix/cell Input field which contains the data and +% time information +% period vector Period which will be selected by the +% function. The start and end date must +% be given in one of the following orders +% [2 x 1] -> start-year, end-year +% [4 x 1] -> start-month/year +% end-month/year +% [6 x 1] -> start-day/month/year +% end-day/month/year +% clms vector Columns which contain the time +% information +% +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: March 2012 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +tval = length(period); + +if nargin < 4 + if tval == 2 + if isnumeric(fld) + clms = 2; + elseif iscell(fld) + clms = 4; + end + elseif tval == 4 + if isnumeric(fld) + clms = [1 2]; + elseif iscell(fld) + clms = [3 4]; + end + end +end + +if nargin < 3 + tscale = 'monthly'; +end + + +if isnumeric(fld) + + if strcmp(tscale, 'monthly') + data_dtes = datenum(fld(clms(2)), fld(clms(1)), 15); + elseif strcmp(tscale, 'daily') + data_dtes = datenum(fld(clms(3)), fld(clms(2)), fld(clms(1))); + end + + if tval == 2 + dte_ref = dtevec([1 period(1)], [12 period(2)], tscale); + dte_indx = find(data_dtes == dte_ref(i, 3)); + elseif tval == 4 + dte_ref = dtevec(period(1:2), period(3:4), tscale); + elseif tval == 6 + dte_ref = dtevec(period(1:3), period(4:6), 'daily'); + end + + + + + + + +elseif iscell(fld) + + if tval == 2 + + yrs = cell2mat(fld(:, clms(1))); + sindx = find(yrs == period(1), 1, 'first'); + eindx = find(yrs == period(2), 1, 'last'); + + elseif tval == 4 + + mnths = cell2mat(fld(:, clms(1))); + yrs = cell2mat(fld(:, clms(2))); + + sindx = find(mnths == period(1) & yrs == period(2), 1, 'first'); + eindx = find(mnths == period(3) & yrs == period(4), 1, 'last'); + + elseif tval == 6 + + days = cell2mat(fld(:, clms(1))); + mnths = cell2mat(fld(:, clms(2))); + yrs = cell2mat(fld(:, clms(3))); + + sindx = find(days == period(1) & ... + mnths == period(2) & ... + yrs == period(3), 1, 'first'); + sindx = find(days == period(4) & ... + mnths == period(5) & ... + yrs == period(6), 1, 'last'); + end + + if isempty(sindx), error('Start date not included in the dataset'); end + if isempty(eindx), error('End date not included in the dataset'); end + + otpt = fld(sindx:eindx, :); +end + + + + + + + + + + + + + + + + + + + + diff --git a/findtstps_cell.m b/findtstps_cell.m new file mode 100644 index 0000000..62a1f4b --- /dev/null +++ b/findtstps_cell.m @@ -0,0 +1,102 @@ +function [otpt] = findtstps_cell(fld, period, clms) +% The function selects the time-steps between a start and end year from a +% given dataset which can be either a matrix or a cell array. + +%-------------------------------------------------------------------------- +% Input: fld matrix/cell Input field which contains the data and +% time information +% period vector Period which will be selected by the +% function. The start and end date must +% be given in one of the following orders +% [2 x 1] -> start-year, end-year +% [4 x 1] -> start-month/year +% end-month/year +% [6 x 1] -> start-day/month/year +% end-day/month/year +% clms vector Columns which contain the time +% information +% +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: March 2012 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +tval = length(period); + +if nargin < 3 + if tval == 2 + if isnumeric(fld) + clms = 2; + elseif iscell(fld) + clms = 4; + end + elseif tval == 4 + if isnumeric(fld) + clms = [1 2]; + elseif iscell(fld) + clms = [3 4]; + end + end +end + +if nargin < 3 + tscale = 'monthly'; +end + + + +if tval == 2 + + yrs = cell2mat(fld(:, clms(1))); + sindx = find(yrs == period(1), 1, 'first'); + eindx = find(yrs == period(2), 1, 'last'); + +elseif tval == 4 + + mnths = cell2mat(fld(:, clms(1))); + yrs = cell2mat(fld(:, clms(2))); + + sindx = find(mnths == period(1) & yrs == period(2), 1, 'first'); + eindx = find(mnths == period(3) & yrs == period(4), 1, 'last'); + +elseif tval == 6 + + days = cell2mat(fld(:, clms(1))); + mnths = cell2mat(fld(:, clms(2))); + yrs = cell2mat(fld(:, clms(3))); + + sindx = find(days == period(1) & ... + mnths == period(2) & ... + yrs == period(3), 1, 'first'); + sindx = find(days == period(4) & ... + mnths == period(5) & ... + yrs == period(6), 1, 'last'); + end + + if isempty(sindx), error('Start date not included in the dataset'); end + if isempty(eindx), error('End date not included in the dataset'); end + +otpt = fld(sindx:eindx, :); + + + + + + + + + + + + + + + + + + + + + diff --git a/findtstps_old.m b/findtstps_old.m new file mode 100644 index 0000000..339c2a0 --- /dev/null +++ b/findtstps_old.m @@ -0,0 +1,176 @@ +function [otpt] = findtstps(fld, period, tscale, clms) +% The function selects the time-steps between a start and end year from a +% given dataset which can be either a matrix or a cell array. + +%-------------------------------------------------------------------------- +% Input: fld matrix/cell Input field which contains the data and +% time information +% period vector Period which will be selected by the +% function. The start and end date must +% be given in one of the following orders +% [2 x 1] -> start-year, end-year +% [4 x 1] -> start-month/year +% end-month/year +% [6 x 1] -> start-day/month/year +% end-day/month/year +% clms vector Columns which contain the time +% information +% +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: March 2012 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +tval = length(period); + +if nargin < 4 + if tval == 2 + if isnumeric(fld) + clms = 2; + elseif iscell(fld) + clms = 4; + end + elseif tval == 4 + if isnumeric(fld) + clms = [1 2]; + elseif iscell(fld) + clms = [3 4]; + end + end +end + +if nargin < 3 + tscale = 'monthly'; +end + + +if isnumeric(fld) + + if tval == 2 + + dte_ref = dtevec([1 period(1)], [12 period(2)], tscale); + + yrs = fld(:, clms(1)); + sindx = find(yrs == period(1), 1, 'first'); + eindx = find(yrs == period(2), 1, 'last'); + + elseif tval == 4 + + mnths = fld(:, clms(1)); + yrs = fld(:, clms(2)); + + sindx = find(mnths == period(1) & yrs == period(2), 1, 'first'); + eindx = find(mnths == period(3) & yrs == period(4), 1, 'last'); + + elseif tval == 6 + + days = fld(:, clms(1)); + mnths = fld(:, clms(2)); + yrs = fld(:, clms(3)); + + sindx = find(days == period(1) & ... + mnths == period(2) & ... + yrs == period(3), 1, 'first'); + sindx = find(days == period(4) & ... + mnths == period(5) & ... + yrs == period(6), 1, 'last'); + end + + if isempty(sindx), error('Start date not included in the dataset'); end + missing_years = period(2) - yrs(end); + rplce = zeros(missing_years*12, size(fld, 2)); + if isempty(eindx), + warning('End date not included in the dataset'); + warning('Fill the missing months with NaNs'); + missing_years = period(2) - yrs(end); + rplce = zeros(missing_years*12, size(fld, 2)); + + rep_mnth = 1; + rep_yr = yrs(end)+1; + for i = 1:size(rplce, 1) + rplce(i,1) = rep_mnth; + rplce(i,2) = rep_yr; + rplce(i,3) = datenum(rep_yr, rep_mnth, 15); + + rep_mnth = rep_mnth + 1; + if rep_mnth == 13 + rep_mnth = 1; + rep_yr = rep_yr + 1; + end + end + rplce(:, 4:end) = NaN; + + fld = [fld; rplce]; + eindx = size(fld, 1); + + + + end + + + if fld(1,clms(1)) == 0 + otpt = [fld(1, :); fld(sindx:eindx, :)]; + else + otpt = fld(sindx:eindx, :); + end + + % NASTY WORKAROUND!!!!! Fill the missing time steps with NANs + + +elseif iscell(fld) + + if tval == 2 + + yrs = cell2mat(fld(:, clms(1))); + sindx = find(yrs == period(1), 1, 'first'); + eindx = find(yrs == period(2), 1, 'last'); + + elseif tval == 4 + + mnths = cell2mat(fld(:, clms(1))); + yrs = cell2mat(fld(:, clms(2))); + + sindx = find(mnths == period(1) & yrs == period(2), 1, 'first'); + eindx = find(mnths == period(3) & yrs == period(4), 1, 'last'); + + elseif tval == 6 + + days = cell2mat(fld(:, clms(1))); + mnths = cell2mat(fld(:, clms(2))); + yrs = cell2mat(fld(:, clms(3))); + + sindx = find(days == period(1) & ... + mnths == period(2) & ... + yrs == period(3), 1, 'first'); + sindx = find(days == period(4) & ... + mnths == period(5) & ... + yrs == period(6), 1, 'last'); + end + + if isempty(sindx), error('Start date not included in the dataset'); end + if isempty(eindx), error('End date not included in the dataset'); end + + otpt = fld(sindx:eindx, :); +end + + + + + + + + + + + + + + + + + + + + diff --git a/findtstps_ts.m b/findtstps_ts.m new file mode 100644 index 0000000..cfe81c8 --- /dev/null +++ b/findtstps_ts.m @@ -0,0 +1,113 @@ +function [otpt] = findtstps_ts(fld, period, tscale, clms) +% The function selects the time-steps between a start and end year from a +% given dataset which can be either a matrix or a cell array. + +%-------------------------------------------------------------------------- +% Input: fld matrix/cell Input field which contains the data and +% time information +% period vector Period which will be selected by the +% function. The start and end date must +% be given in one of the following orders +% [2 x 1] -> start-year, end-year +% [4 x 1] -> start-month/year +% end-month/year +% [6 x 1] -> start-day/month/year +% end-day/month/year +% clms vector Columns which contain the time +% information +% +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: March 2012 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +tval = length(period); + +if nargin < 3 + tscale = 'monthly'; +end + +if nargin < 4 + if strcmp(tscale, 'monthly') + clms = [1 2 4]; + elseif strcmp(tscale, 'daily') + clms = [1 2 3 5]; + end +end + + + + +if strcmp(tscale, 'monthly') + + data_dtes = datenum(fld(:, clms(2)), fld(:, clms(1)), 15); + + if tval == 2 + dte_ref = dtevec([1 period(1)], [12 period(2)], 'monthly'); + elseif tval == 4 + dte_ref = dtevec(period(1:2), period(3:4), 'monthly'); + end + + num_indx = 3; + + +elseif strcmp(tscale, 'daily') + + data_dtes = datenum(fld(clms(3)), fld(clms(2)), fld(clms(1))); + + if tval == 2 + dte_ref = dtevec([1 period(1)], [12 period(2)], 'daily'); + elseif tval == 4 + dte_ref = dtevec(period(1:2), period(3:4), 'daily'); + elseif tval == 6 + dte_ref = dtevec(period(1:3), period(4:6), 'daily'); + end + + num_indx = 4; + + +end + +otpt = zeros(size(dte_ref, 1), size(fld, 2)).*NaN; + + +for i = 1:length(dte_ref) + data_indx = find(data_dtes == dte_ref(i, num_indx)); + otpt(i, 1:num_indx) = dte_ref(i, :); + + if ~isempty(data_indx) + otpt(i, num_indx+1:end) = fld(data_indx, clms(end):end); + end +end + +if fld(1,1) == 0 + otpt = [fld(1, :); otpt]; +end + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/fitline.m b/fitline.m new file mode 100644 index 0000000..fcd7885 --- /dev/null +++ b/fitline.m @@ -0,0 +1,26 @@ +function [a b] = fitline(X, Y, weights) + +% The function fits a line in the two datasets X and Y (for a scatterplot) +if nargin < 3 + weights = ones(length(X), 1); +end + + +if find(isnan(X)) + weights = weights(~isnan(X)); + Y = Y(~isnan(X)); + X = X(~isnan(X)); +elseif find(isnan(Y)) + weights = weights(~isnan(Y)); + X = X(~isnan(Y)); + Y = Y(~isnan(Y)); +end + + +A = [X ones(length(X), 1)]; +P = diag(weights); + +xht = inv(A'*P*A)*A'*P*Y; + +a = xht(1); +b = xht(2); \ No newline at end of file diff --git a/fixepsbbox.m b/fixepsbbox.m new file mode 100644 index 0000000..296f353 --- /dev/null +++ b/fixepsbbox.m @@ -0,0 +1,47 @@ +function fixepsbbox(filename) +% function fixepsbbox(filename) +% +% matlab seems to compute a bounding box on eps files which is too +% large in the x-direction +% +% this script fixes the bounding box +% it is 99% stolen from fixeps.m, located here: +% http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=4818&objectType=file +% the only change is that this changes the bbox numbers +% seriously, the only change is the addition of lines 22,23,33,34 +% +% boundingbox has form of: +% %%BoundingBox: x1 y1 x2 y2 +% where (x1,y1) is lower-left and (x2,y2) is upper-right +% +% matlab computes x1 too small and x2 too large +% changes lines to: +% %%BoundingBox: x1+dx1 y1 x2+dx2 y2 + +% default amount to change bbox - found this fixed my plots just fine +dx1 = 10; % amount to move x1 +dx2 = -25; % amount to move x2 + +fid = fopen(filename,'r+'); +k=0; +while k <2 % 2 locations to replace. + tline = fgetl(fid); % get one line text + stridx=strfind(tline,'Box:'); + if isempty(stridx)==0 + len=length(tline); % the original line length + bb=sscanf(tline(stridx+4:end),'%i'); % read the numbers + bb(1) = bb(1) + dx1; % change x1 + bb(3) = bb(3) + dx2; % change x2 + bbstr=sprintf('%g %g %g %g',bb); % write bb numbers to string + tline=tline(1:stridx+3); % keep the "%%(page)boundingbox" string (with starting '%%') + spaces(1:len-length(tline)-length(bbstr)-1)=' '; % add trailing spaces as to overwrite old line completely + tline=[tline ' ' bbstr spaces]; % concate numbers and blank spaces to "%%(page)boundingbox" + + fseek(fid,-len-2,'cof'); % before using fprintf search to correct position + count = fprintf(fid,'%s',tline); + fseek(fid,2,'cof'); % seek to beginning of line (for windows text file) on + % for linux: change '2' to '1' I think + k=k+1; + end +end +fclose(fid); diff --git a/fld2gis.m b/fld2gis.m new file mode 100644 index 0000000..b94d260 --- /dev/null +++ b/fld2gis.m @@ -0,0 +1,43 @@ +function fld2gis(fld, fname, xll, yll, cellsz) + +% FLD2GIS +% +% This function writes the fields obtained fom GSHS to an ASCII file format +% for use in ArcView GIS package. +% +% INPUT - +% fld - GSHS fields +% fname - file name of the ArcView file +% xll - lower left corner x-coordinate +% yll - lower left corner y-coordinate +% cellsz - cell size +% OUTPUT - ASCII files in the ArcView GIS format +% +%-------------------------------------------------------------------------- + +% Author: Balaji Devaraju +% Created on: 29 March 2007, Stuttgart +%-------------------------------------------------------------------------- + +tt = '%14.6f '; +[m,n] = size(fld); +frmtwrt = [repmat(tt,1,n), '\n']; + +%fld = [fld(:,((n/2)+1):end), fld(:,1:(n/2))]; + + +fid = fopen(fname,'w+'); +fprintf(fid, 'ncols %g',n); +fprintf(fid, '\n'); +fprintf(fid, 'nrows %g',m); +fprintf(fid, '\n'); +fprintf(fid, 'xllcorner %g',xll); +fprintf(fid, '\n'); +fprintf(fid, 'yllcorner %g',yll); +fprintf(fid, '\n'); +fprintf(fid, 'cellsize %2.4f', cellsz); +fprintf(fid, '\n'); +fprintf(fid, 'NODATA_value -9999'); +fprintf(fid, '\n'); +fprintf(fid, frmtwrt, fld'); +fclose(fid); \ No newline at end of file diff --git a/flx2div.m b/flx2div.m new file mode 100644 index 0000000..d2f0e4e --- /dev/null +++ b/flx2div.m @@ -0,0 +1,56 @@ +function divfld = flx2div(flx_U, flx_V, dx, dy, theta); +% The function computes divergences from the u- and v-components of a flux +% field. +%-------------------------------------------------------------------------- +% Input: flx_U, flx_V u- and v-components of a flux field +% dx, dy angular distance between two grid-cells [°] +% theta vector of latitudes for the correction of +% meridional convergence [°] +% Output: div divergence field +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: August 2011 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +rho = pi/180; % Conversion factor from [°] to [rad] +R = 6371000; % Earth radius [m] + +if size(theta,2) > 1 + theta = theta'; +end + +theta = theta*ones(1,720); + +dy = (dy*rho)*R; % Metric distance between two +dx = (dx*rho)*R*cos(theta*rho); % gridcells [m] +% keyboard +qv_n = zeros(size(flx_U)); +qv_s = zeros(size(flx_U)); +qu_w = zeros(size(flx_U)); +qu_e = zeros(size(flx_U)); +div_x = zeros(size(flx_U)); +div_y = zeros(size(flx_U)); +divfld = zeros(size(flx_U)); + +% Computation of the flow from the northern and southern adjacent gridcells +qv_n(2:end-1,:) = (flx_V(2:end-1,:) + flx_V(1:end-2,:))/2; +qv_s(2:end-1,:) = (flx_V(2:end-1,:) + flx_V(3:end,:))/2; + +% Computation of the flow from the eastern and western adjacent gridcells +qu_w(2:end-1,2:end) = (flx_U(2:end-1,2:end) + flx_U(2:end-1,1:end-1))/2; +qu_w(2:end-1,1) = (flx_U(2:end-1,1) + flx_U(2:end-1,end))/2; + +qu_e(2:end-1,1:end-1) = (flx_U(2:end-1,1:end-1) + flx_U(2:end-1,2:end))/2; +qu_e(2:end-1,end) = (flx_U(2:end-1,end) + flx_U(2:end-1,1))/2; + +% Computation of the correction factor for meridional convergence +corr = 1/2*(qv_n + qv_s).*tan(theta*rho)/R; + +% Partial derivatives of the flux field with respect to the east-west +% and north-south components respectively +div_x = (qu_e - qu_w)./dx; +div_y = (qv_n - qv_s)/dy; + +divfld = div_x + div_y - corr; \ No newline at end of file diff --git a/flx2vimfd.m b/flx2vimfd.m new file mode 100644 index 0000000..f242549 --- /dev/null +++ b/flx2vimfd.m @@ -0,0 +1,109 @@ +function vimfd = flx2vimfd(inpt, uflx_c, vflx_c, con) + +% ------------------------------------------------------------------------- +% The funciton computes maps of vertically integraded moisture flux +% divergences from fields of eastward and northward specific humidity +% ------------------------------------------------------------------------- +% Input: inpt [n x m] cell-array which contains month, year, +% eastward and northward component of specific +% humidity +% uflx_c [1 x 1] column in which the fields of the eastward +% component of specific humidity is stored +% vflx_c [1 x 1] column in which the fields of the northward +% component of specific humidity is stored +% +% Output: vimfd [n x 3] cell-structure which contains the month and +% year of a specific dataset in the first two +% columns and the fields of vertically +% integraded moisture flux divergences in the +% third column +% +% ------------------------------------------------------------------------- +% Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% September 2010 +% ------------------------------------------------------------------------- +% Uses: +% ------------------------------------------------------------------------- + +R_e = 6370; + +uflx = inpt(:,uflx_c); +vflx = inpt(:,vflx_c); + +if nargin < 4 + con = 0; +end + +if nargin < 2 + uflx_c = 3; + vflx_c = 4; +end + + + +dim = size(uflx{1,1}); + +n_lats = dim(1); +n_longs = dim(2); + +d_lat = 180/n_lats; +d_long = 360/n_longs; + +lat(:,1) = 90-d_lat/2:-d_lat:-90+d_lat/2; +lon(1,:) = -180+d_long/2:d_long:180-d_long/2; + + +distx(:,1) = cos(lat*pi/180)*((R_e*pi*2/360)*0.5) * 1000; + +disty = (((R_e*pi*2/360)*0.5) * 1000); + +f_n = zeros(size(uflx{1,1})); +f_s = zeros(size(uflx{1,1})); +f_w = zeros(size(uflx{1,1})); +f_e = zeros(size(uflx{1,1})); +corr = zeros(size(uflx{1,1})); + +% -------fn------- +% | | +% | | +% fw vimfd fe +% | | +% | | +% -------fs------- + +dist_x_mask = distx*ones(1,720); +lat_mask = lat*ones(1,720); + +for i = 1:length(uflx) + + vimfd{i,1} = inpt{i,1}; + vimfd{i,2} = inpt{i,2}; + vimfd{i,3} = zeros(size(uflx{1,1})); + + f_n(2:end-1,:) = (vflx{i}(2:end-1,:) + vflx{i}(1:end-2,:))/2; + f_s(2:end-1,:) = (vflx{i}(2:end-1,:) + vflx{i}(3:end,:))/2; + + f_w(2:end-1, 2:end-1) = (uflx{i}(2:end-1,2:end-1) + uflx{i}(2:end-1, 1:end-2))/2; + f_e(2:end-1, 2:end-1) = (uflx{i}(2:end-1,2:end-1) + uflx{i}(2:end-1, 3:end))/2; + + f_w(2:end-1, 1) = (uflx{i}(2:end-1,1) + uflx{i}(2:end-1,end))/2; + f_w(2:end-1, end) = (uflx{i}(2:end-1,end-1) + uflx{i}(2:end-1,end))/2; + + f_e(2:end-1, 1) = (uflx{i}(2:end-1,1) + uflx{i}(2:end-1,2))/2; + f_e(2:end-1, end) = (uflx{i}(2:end-1,end) + uflx{i}(2:end-1,1))/2; + + % Correction factors + corr(2:end-1,:) = ((f_n(2:end-1,:)+f_s(2:end-1,:))*0.5.*tan(lat_mask(2:end-1,:)*pi/180))/(R_e*1000); + + div_x = (f_e-f_w)./dist_x_mask; + div_y = (f_n-f_s)/disty; + + vimfd{i,3}(:,:) = div_x + div_y - corr; + vimfd{i,3}(1,:) = mean(vimfd{i,3}(1,:)); + vimfd{i,3}(end,:) = mean(vimfd{i,3}(end,:)); + + if con == 1 + vimfd{i,3} = vimfd{i,3}*(-1); + end + +end diff --git a/gen_mask.m b/gen_mask.m new file mode 100644 index 0000000..96732eb --- /dev/null +++ b/gen_mask.m @@ -0,0 +1,82 @@ +function mask = gen_mask(cswitch) +% The function generates a mask of 360x720 gridpoints containing elements +% of 0 and 1. +% ------------------------------------------------------------------------- +% INPUT: cswitch scalar Defines the desired regions. See the +% table below for possible selectins: +% 1: Only Landmasses +% 2: Only oceans +% 3: Landmasses without polar caps +% 4: Landmasses, NH +% 5: Landmasses, SH +% 6: Landmasses, 15°N - 15°S +% 7: Global, NH +% 8: Global, SH +% 9: Global, 15°N - 15°S +% 10: North America +% 11: South America +% 12: Europe +% 13: Africa +% 14: Asia +% 15: Australia +% ------------------------------------------------------------------------- +% OUTPUT mask [360x720] matrix with elements of 0 and 1 where all +% undesired gridpoints are set to 0 +% ------------------------------------------------------------------------- +% Author: Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% Date: May 2011 +% ------------------------------------------------------------------------- + + +load continents.asc +mask = ones(360, 720); + +% Generate a mask of 360x720 gridpoints according to the parameter cswitch +if cswitch == 1 % Only Landmasses + mask(continents == -9999) = 0; % Remove oceans + mask(continents == 4) = 0; % Remove ice-shelf +elseif cswitch == 2 % Only oceans + mask(continents ~= -9999) = 0; % Remove landmasses + mask(continents == 4) = 1; % Add ice-shelf +elseif cswitch == 3 % Landmasses without polar caps + mask(continents == -9999) = 0; % Remove oceans + mask(continents == 0) = 0; % Remove polar caps + mask(continents == 4) = 0; % Remove ice-shelf +elseif cswitch == 4 % Landmasses, NH + mask(continents == -9999) = 0; % Remove oceans + mask(continents == 4) = 0; % Remove ice-shelf + mask(181:end,:) = 0; % Remove the SH-gridpoints +elseif cswitch == 5 % Landmasses, SH + mask(continents == -9999) = 0; % Remove oceans + mask(continents == 4) = 0; % Remove ice-shelf + mask(1:180,:) = 0; % Remove the NH-gridpoints +elseif cswitch == 55 % Landmasses, SH + mask(continents == -9999) = 0; % Remove oceans + mask(continents == 4) = 0; % Remove ice-shelf + mask(1:180,:) = 0; % Remove the NH-gridpoints + mask(continents == 0) = 0; % Remove Sout-Pole +elseif cswitch == 6 % Landmasses, 15°N - 15°S + mask(continents == -9999) = 0; % Remove oceans + mask(continents == 4) = 0; % Remove ice-shelf + mask(1:149, :) = 0; % Remove all gridpoints > 15°N + mask(212:end, :) = 0; % Remove all gridpoints > 15°S +elseif cswitch == 7 % Global, NH + mask(1:180,:) = 0; % Remove the SH-gridpoints +elseif cswitch == 8 % Global, SH + mask(1:180,:) = 0; % Remove the NH-gridpoints +elseif cswitch == 9 % Global, 15°N - 15°S + mask(1:149, :) = 0; % Remove all gridpoints > 15°N + mask(212:end, :) = 0; % Remove all gridpoints > 15°S +elseif cswitch == 10 + mask(continents ~= 7) = 0; +elseif cswitch == 11 + mask(continents ~= 6) = 0; +elseif cswitch == 12 + mask(continents ~= 3) = 0; +elseif cswitch == 13 + mask(continents ~= 8) = 0; +elseif cswitch == 14 + mask(continents ~= 9) = 0; +elseif cswitch == 15 + mask(continents ~= 1) = 0; +end \ No newline at end of file diff --git a/geodweekcomp.m b/geodweekcomp.m new file mode 100644 index 0000000..e61d762 --- /dev/null +++ b/geodweekcomp.m @@ -0,0 +1,163 @@ +% clear all +% +% load /media/storage/Data/Mflux/ECMWF/ECMWF_VIMFD.mat +% ec_indx1 = find(cell2mat(ecmwf_vimfd(:,4)) == 1 & cell2mat(ecmwf_vimfd(:,5)) == 2004); +% ec_indx2 = find(cell2mat(ecmwf_vimfd(:,4)) == 12 & cell2mat(ecmwf_vimfd(:,5)) == 2009); +% ecmwf_vimfd = ecmwf_vimfd(ec_indx1:ec_indx2,:); +% +% cd /home/lorenz-c/Dokumente/GRACE/SHBundle +% +% W = gaussfltr(200, 500); +% W = W*ones(1,401); +% +% for i = 1:72 +% tmp1 = [ecmwf_vimfd{i,9}(:, 361:end) ecmwf_vimfd{i,9}(:, 1:360)]; +% tmp2 = cs2sc(gsha(tmp1, 'ls', 'cell', 200)); +% tmp3 = tmp2.*W; +% +% ecflt{i,1} = ecmwf_vimfd{i,4}; +% ecflt{i,2} = ecmwf_vimfd{i,5}; +% ecflt{i,3} = gshs(tmp3, 'none', 'cell', 360, 0, 0, 0, 'grace', 1); +% end +% keyboard +% save ecflt.mat ecflt +% clear all +% +% load /media/storage/Data/Mflux/MERRA/MERRA_VIMFD.mat +% mr_indx1 = find(cell2mat(merra_vimfd(:,4)) == 1 & cell2mat(merra_vimfd(:,5)) == 2004); +% mr_indx2 = find(cell2mat(merra_vimfd(:,4)) == 12 & cell2mat(merra_vimfd(:,5)) == 2009); +% merra_vimfd = merra_vimfd(mr_indx1:mr_indx2,:); +% +% cd /home/lorenz-c/Dokumente/GRACE/SHBundle +% +% W = gaussfltr(200, 500); +% W = W*ones(1,401); +% +% for i = 1:72 +% +% tmp1 = [merra_vimfd{i,9}(:, 361:end) merra_vimfd{i,9}(:, 1:360)]; +% tmp2 = cs2sc(gsha(tmp1, 'ls', 'cell', 200)); +% tmp3 = tmp2.*W; +% +% mcflt{i,1} = merra_vimfd{i,4}; +% mcflt{i,2} = merra_vimfd{i,5}; +% mcflt{i,3} = gshs(tmp3, 'none', 'cell', 360, 0, 0, 0, 'grace', 1); +% end +% +% save mrflt.mat mrflt +% clear all +% +% +% +% +% load /media/storage/Data/Mflux/CFSR/CFSR_VIMFD.mat +% cf_indx1 = find(cell2mat(cfsr_vimfd(:,4)) == 1 & cell2mat(cfsr_vimfd(:,5)) == 2004); +% cf_indx2 = find(cell2mat(cfsr_vimfd(:,4)) == 12 & cell2mat(cfsr_vimfd(:,5)) == 2009); +% cfsr_vimfd = cfsr_vimfd(cf_indx1:cf_indx2,:); +% +% +% cd /home/lorenz-c/Dokumente/GRACE/SHBundle +% +% W = gaussfltr(200, 500); +% W = W*ones(1,401); +% +% +% +% +% for i = 1:72 +% +% +% tmp1 = [cfsr_vimfd{i,9}(:, 361:end) cfsr_vimfd{i,9}(:, 1:360)]; +% tmp2 = cs2sc(gsha(tmp1, 'ls', 'cell', 200)); +% tmp3 = tmp2.*W; +% +% cfflt{i,1} = cfsr_vimfd{i,4}; +% cfflt{i,2} = cfsr_vimfd{i,5}; +% cfflt{i,3} = gshs(tmp3, 'none', 'cell', 360, 0, 0, 0, 'grace', 1); +% +% end +% +% save cfflt.mat cfflt + + +% % mdiv_ecmwf = spataggmn(ecmwf_vimfd, indexfile3, [193, 183, 320], 'clms', [4 5 9]); +% % +% % clear ecmwf_vimfd +% +% % load /media/storage/Data/Mflux/MERRA/MERRA_VIMFD.mat +% % mdiv_merra = spataggmn(merra_vimfd, indexfile3, [193, 183, 320], 'clms', [4 5 9]); +% % +% % clear merra_vimfd +% +% % load /media/storage/Data/Runoff/GRDC/Runoff_review_2011.mat +% % +% % R = [Runoff_review(:, 1:2) Runoff_review(:, [3 9 17])]; +% % +% % load /home/lorenz-c/Downloads/gfz4mdsg500_der_c.mat +% % +% % +% % for i = 1:98 +% % gfz4mdsg500_der_c{i,5} = [gfz4mdsg500_der_c{i,5}(:, 361:end) gfz4mdsg500_der_c{i,5}(:, 1:360)]; +% % end +% % +% % grace = spataggmn(gfz4mdsg500_der_c, indexfile3, [193, 183, 320], 'clms', [2 1 5]); +% +% ec_indx1 = find(mdiv_ecmwf(:,1) == 1 & mdiv_ecmwf(:,2) == 2004); +% ec_indx2 = find(mdiv_ecmwf(:,1) == 12 & mdiv_ecmwf(:,2) == 2009); +% +% % mr_indx1 = find(mdiv_merra(:,1) == 1 & mdiv_merra(:,2) == 2003); +% % mr_indx2 = find(mdiv_merra(:,1) == 12 & mdiv_merra(:,2) == 2008); +% +% % gr_indx1 = find(grace(:,1) == 1 & grace(:,2) == 2004); +% % gr_indx2 = find(grace(:,1) == 12 & grace(:,2) == 2009); +% +% % grace = grace(gr_indx1:gr_indx2,:); +% +% rf_indx1 = find(R(:,2) == 1 & R(:,1) == 2004); +% rf_indx2 = find(R(:,2) == 12 & R(:,1) == 2009); +% +% +% dsdt_ecmwf = mdiv_ecmwf(ec_indx1:ec_indx2,:); +% dsdt_ecmwf(:, 4:end) = -dsdt_ecmwf(:, 4:end) - R(rf_indx1:rf_indx2, 3:end); +% % +% dsdt_merra = mdiv_merra(mr_indx1:mr_indx2,:); +% dsdt_merra = -dsdt_merra(:, 4:end) - R(:, 4:end); +% clear all +% clc + + +load /media/storage/Analysis/ecflt.mat +load indexfile3.asc + +ecmwf = spataggmn(ecflt, indexfile3, [193, 183, 320, 29], 'clms', [1 2 3]); +clear ecflt + +load /media/storage/Analysis/mcflt.mat + +merra = spataggmn(mcflt, indexfile3, [193, 183, 320, 29], 'clms', [1 2 3]); +clear mcflt + +load /media/storage/Analysis/cfflt.mat +load indexfile3.asc + +cfsr = spataggmn(cfflt, indexfile3, [193, 183, 320, 29], 'clms', [1 2 3]); +clear cfflt + +load /home/lorenz-c/Dokumente/gfz4mdsg500_der_c.mat +for i = 1:98 + gfz4mdsg500_der_c{i,5} = [gfz4mdsg500_der_c{i,5}(:,361:end) gfz4mdsg500_der_c{i,5}(:,1:360)]; +end + +grace = spataggmn(gfz4mdsg500_der_c, indexfile3, [193, 183, 320, 29], 'clms', [2 1 5]); +clear gfz4mdsg500_der_c i + + +load /media/storage/Data/Runoff/GRDC/GRDC_R.mat + + + + + + + + diff --git a/glob_corr.m b/glob_corr.m new file mode 100644 index 0000000..bfb08bc --- /dev/null +++ b/glob_corr.m @@ -0,0 +1,55 @@ +function [corrs corr] = glob_corr(set1, set2) +% The function computes a global map (corrs) where the different catchments +% show the correlation between set1 and set2. It can thus be used for a +% rough global comparison of two datasets. +% Therefore, both sets must be given as [405 x 3] structure variables in +% the following form: +% set = {'ctchmnt_name', 'ctchmnt_id', time_series} +% The time series must be given as [n x 3] vector with the following +% elements: +% time_series = [month year signal_value] +% The function automatically determines the dataset with the least values +% and computes the correlation only in the overlapping time period. The +% ctchmnt_ids must agree with the ids in the global indexfile.asc + + +load indexfile3.asc +corrs = zeros(360,720); + + +r1 = size(set1{1,3},1); +r2 = size(set2{1,3},1); +r3 = size(set1, 1); + +mx = min([r1 r2]); + +for i = 1:r3 + tmp1 = set1{i,3}(1:mx,3); + tmp2 = set2{i,3}(1:mx,3); + + mn_s1(i) = mean(tmp1); + mn_s2(i) = mean(tmp2); + + dv_s1 = tmp1 - mn_s1(i); + dv_s2 = tmp2 - mn_s2(i); + + sdv_s1 = sqrt(1/mx*(dv_s1'*dv_s1)); + sdv_s2 = sqrt(1/mx*(dv_s2'*dv_s2)); + + corr(i) = (dv_s1'*dv_s2)/((mx-1)*(sdv_s1*sdv_s2)); + + if corr(i) > 1 + corr(i) = 1; + elseif corr(i) < -1 + corr(i) = -1; + end + + corrs(indexfile3 == set1{i,2}) = corr(i); +end + + + + + + + diff --git a/glob_prec_corr.m b/glob_prec_corr.m new file mode 100644 index 0000000..482edea --- /dev/null +++ b/glob_prec_corr.m @@ -0,0 +1,402 @@ +% load /media/storage/Data/Precipitation/GPCC/GPCC_PRECv4.0.mat +% load /media/storage/Data/Precipitation/GPCP/GPCP_PRECv2.1.mat +% +% +% gpcp_glob_ann = comp_glob_corr(gpcc_prec, gpcp_prec, [1989 2006], 1, 'annual', [4 5 9], -9999); +% gpcp_glob_mon = comp_glob_corr(gpcc_prec, gpcp_prec, [1989 2006],1, 'monthly', [4 5 9], -9999); +% +% gpcp_nh_ann = comp_glob_corr(gpcc_prec, gpcp_prec, [1989 2006],4, 'annual', [4 5 9], -9999); +% gpcp_nh_mon = comp_glob_corr(gpcc_prec, gpcp_prec, [1989 2006],4, 'monthly', [4 5 9], -9999); +% +% gpcp_sh_ann = comp_glob_corr(gpcc_prec, gpcp_prec, [1989 2006],5, 'annual', [4 5 9], -9999); +% gpcp_sh_mon = comp_glob_corr(gpcc_prec, gpcp_prec, [1989 2006],5, 'monthly', [4 5 9], -9999); +% +% gpcp_trpc_ann = comp_glob_corr(gpcc_prec, gpcp_prec, [1989 2006],6, 'annual', [4 5 9], -9999); +% gpcp_trpc_mon = comp_glob_corr(gpcc_prec, gpcp_prec, [1989 2006],6, 'monthly', [4 5 9], -9999); +% +% gpcp_cont_ann = comp_cont_corr(gpcc_prec, gpcp_prec, [1989 2006], 'annual', [7 6 3 8 9 1], [4 5 9], -9999); +% gpcp_cont_mon = comp_cont_corr(gpcc_prec, gpcp_prec, [1989 2006], 'monthly', [7 6 3 8 9 1], [4 5 9], -9999); +% +% +% gpcp_ann(:,1) = gpcp_glob_ann(:,2); +% gpcp_ann(:,2) = gpcp_nh_ann(:,2); +% gpcp_ann(:,3) = gpcp_sh_ann(:,2); +% gpcp_ann(:,4) = gpcp_cont_ann(:,2); +% gpcp_ann(:,5) = gpcp_cont_ann(:,3); +% gpcp_ann(:,6) = gpcp_cont_ann(:,4); +% gpcp_ann(:,7) = gpcp_cont_ann(:,5); +% gpcp_ann(:,8) = gpcp_cont_ann(:,6); +% gpcp_ann(:,9) = gpcp_cont_ann(:,7); +% gpcp_ann(:,10) = gpcp_trpc_ann(:,2); +% +% gpcp_mon(:,1) = gpcp_glob_mon(:,2); +% gpcp_mon(:,2) = gpcp_nh_mon(:,2); +% gpcp_mon(:,3) = gpcp_sh_mon(:,2); +% gpcp_mon(:,4) = gpcp_cont_mon(:,2); +% gpcp_mon(:,5) = gpcp_cont_mon(:,3); +% gpcp_mon(:,6) = gpcp_cont_mon(:,4); +% gpcp_mon(:,7) = gpcp_cont_mon(:,5); +% gpcp_mon(:,8) = gpcp_cont_mon(:,6); +% gpcp_mon(:,9) = gpcp_cont_mon(:,7); +% gpcp_mon(:,10) = gpcp_trpc_mon(:,2); +% clear *nhsh* *glob* *cont* gpcp_prec +% keyboard +% load /media/storage/Data/Precipitation/CRU3/CRU3_PRECv3.0.mat +% cru_glob_ann = comp_glob_corr(gpcc_prec,cru_prec, [1989 2006],1, 'annual', [4 5 9], -9999); +% cru_glob_mon = comp_glob_corr(gpcc_prec,cru_prec, [1989 2006],1, 'monthly', [4 5 9], -9999); +% +% cru_nh_ann = comp_glob_corr(gpcc_prec,cru_prec, [1989 2006],4, 'annual', [4 5 9], -9999); +% cru_nh_mon = comp_glob_corr(gpcc_prec,cru_prec, [1989 2006],4, 'monthly', [4 5 9], -9999); +% +% cru_sh_ann = comp_glob_corr(gpcc_prec,cru_prec, [1989 2006],5, 'annual', [4 5 9], -9999); +% cru_sh_mon = comp_glob_corr(gpcc_prec,cru_prec, [1989 2006],5, 'monthly', [4 5 9], -9999); +% +% cru_trpc_ann = comp_glob_corr(gpcc_prec,cru_prec, [1989 2006],6, 'annual', [4 5 9], -9999); +% cru_trpc_mon = comp_glob_corr(gpcc_prec,cru_prec, [1989 2006],6, 'monthly', [4 5 9], -9999); +% +% cru_cont_ann = comp_cont_corr(gpcc_prec,cru_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999); +% cru_cont_mon = comp_cont_corr(gpcc_prec,cru_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999); +% +% +% cru_ann(:,1) = cru_glob_ann(:,2); +% cru_ann(:,2) = cru_nh_ann(:,2); +% cru_ann(:,3) = cru_sh_ann(:,2); +% cru_ann(:,4) = cru_cont_ann(:,2); +% cru_ann(:,5) = cru_cont_ann(:,3); +% cru_ann(:,6) = cru_cont_ann(:,4); +% cru_ann(:,7) = cru_cont_ann(:,5); +% cru_ann(:,8) = cru_cont_ann(:,6); +% cru_ann(:,9) = cru_cont_ann(:,7); +% cru_ann(:,10) = cru_trpc_ann(:,2); +% +% cru_mon(:,1) = cru_glob_mon(:,2); +% cru_mon(:,2) = cru_nh_mon(:,2); +% cru_mon(:,3) = cru_sh_mon(:,2); +% cru_mon(:,4) = cru_cont_mon(:,2); +% cru_mon(:,5) = cru_cont_mon(:,3); +% cru_mon(:,6) = cru_cont_mon(:,4); +% cru_mon(:,7) = cru_cont_mon(:,5); +% cru_mon(:,8) = cru_cont_mon(:,6); +% cru_mon(:,9) = cru_cont_mon(:,7); +% cru_mon(:,10) = cru_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* cru_prec* + + +% load /media/storage/Data/Precipitation/CPC/CPC_PREC.mat +% cpc_glob_ann = comp_glob_corr(gpcc_prec, cpc_prec, [1989 2006],1, 'annual', [4 5 9], -9999); +% cpc_glob_mon = comp_glob_corr(gpcc_prec,cpc_prec, [1989 2006],1, 'monthly', [4 5 9], -9999); +% +% cpc_nh_ann = comp_glob_corr(gpcc_prec,cpc_prec, [1989 2006],4, 'annual', [4 5 9], -9999); +% cpc_nh_mon = comp_glob_corr(gpcc_prec,cpc_prec, [1989 2006],4, 'monthly', [4 5 9], -9999); +% +% cpc_sh_ann = comp_glob_corr(gpcc_prec,cpc_prec, [1989 2006],5, 'annual', [4 5 9], -9999); +% cpc_sh_mon = comp_glob_corr(gpcc_prec,cpc_prec, [1989 2006],5, 'monthly', [4 5 9], -9999); +% +% cpc_trpc_ann = comp_glob_corr(gpcc_prec,cpc_prec, [1989 2006],6, 'annual', [4 5 9], -9999); +% cpc_trpc_mon = comp_glob_corr(gpcc_prec,cpc_prec, [1989 2006],6, 'monthly', [4 5 9], -9999); +% +% cpc_cont_ann = comp_cont_corr(gpcc_prec,cpc_prec, [1989 2006], 'annual',[7 6 3 8 9 1], [4 5 9], -9999); +% cpc_cont_mon = comp_cont_corr(gpcc_prec,cpc_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999); +% clear cpc_prec +% +% +% cpc_ann(:,1) = cpc_glob_ann(:,2); +% cpc_ann(:,2) = cpc_nh_ann(:,2); +% cpc_ann(:,3) = cpc_sh_ann(:,2); +% cpc_ann(:,4) = cpc_cont_ann(:,2); +% cpc_ann(:,5) = cpc_cont_ann(:,3); +% cpc_ann(:,6) = cpc_cont_ann(:,4); +% cpc_ann(:,7) = cpc_cont_ann(:,5); +% cpc_ann(:,8) = cpc_cont_ann(:,6); +% cpc_ann(:,9) = cpc_cont_ann(:,7); +% cpc_ann(:,10) = cpc_trpc_ann(:,2); +% +% cpc_mon(:,1) = cpc_glob_mon(:,2); +% cpc_mon(:,2) = cpc_nh_mon(:,2); +% cpc_mon(:,3) = cpc_sh_mon(:,2); +% cpc_mon(:,4) = cpc_cont_mon(:,2); +% cpc_mon(:,5) = cpc_cont_mon(:,3); +% cpc_mon(:,6) = cpc_cont_mon(:,4); +% cpc_mon(:,7) = cpc_cont_mon(:,5); +% cpc_mon(:,8) = cpc_cont_mon(:,6); +% cpc_mon(:,9) = cpc_cont_mon(:,7); +% cpc_mon(:,10) = cpc_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% load /media/storage/Data/Precipitation/ECMWF/ECMWF_PREC.mat +% ecmwf_glob_ann = comp_glob_corr(gpcc_prec,ecmwf_prec, [1989 2006],1, 'annual', [4 5 9], -9999); +% ecmwf_glob_mon = comp_glob_corr(gpcc_prec,ecmwf_prec, [1989 2006],1, 'monthly', [4 5 9], -9999); +% +% ecmwf_nh_ann = comp_glob_corr(gpcc_prec,ecmwf_prec, [1989 2006],4, 'annual', [4 5 9], -9999); +% ecmwf_nh_mon = comp_glob_corr(gpcc_prec,ecmwf_prec, [1989 2006],4, 'monthly', [4 5 9], -9999); +% +% ecmwf_sh_ann = comp_glob_corr(gpcc_prec,ecmwf_prec, [1989 2006],5, 'annual', [4 5 9], -9999); +% ecmwf_sh_mon = comp_glob_corr(gpcc_prec,ecmwf_prec, [1989 2006],5, 'monthly', [4 5 9], -9999); +% +% ecmwf_trpc_ann = comp_glob_corr(gpcc_prec,ecmwf_prec, [1989 2006],6, 'annual', [4 5 9], -9999); +% ecmwf_trpc_mon = comp_glob_corr(gpcc_prec,ecmwf_prec, [1989 2006],6, 'monthly', [4 5 9], -9999); +% +% ecmwf_cont_ann = comp_cont_corr(gpcc_prec,ecmwf_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999); +% ecmwf_cont_mon = comp_cont_corr(gpcc_prec,ecmwf_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999); +% clear ecmwf_prec +% +% +% ecmwf_ann(:,1) = ecmwf_glob_ann(:,2); +% ecmwf_ann(:,2) = ecmwf_nh_ann(:,2); +% ecmwf_ann(:,3) = ecmwf_sh_ann(:,2); +% ecmwf_ann(:,4) = ecmwf_cont_ann(:,2); +% ecmwf_ann(:,5) = ecmwf_cont_ann(:,3); +% ecmwf_ann(:,6) = ecmwf_cont_ann(:,4); +% ecmwf_ann(:,7) = ecmwf_cont_ann(:,5); +% ecmwf_ann(:,8) = ecmwf_cont_ann(:,6); +% ecmwf_ann(:,9) = ecmwf_cont_ann(:,7); +% ecmwf_ann(:,10) = ecmwf_trpc_ann(:,2); +% +% ecmwf_mon(:,1) = ecmwf_glob_mon(:,2); +% ecmwf_mon(:,2) = ecmwf_nh_mon(:,2); +% ecmwf_mon(:,3) = ecmwf_sh_mon(:,2); +% ecmwf_mon(:,4) = ecmwf_cont_mon(:,2); +% ecmwf_mon(:,5) = ecmwf_cont_mon(:,3); +% ecmwf_mon(:,6) = ecmwf_cont_mon(:,4); +% ecmwf_mon(:,7) = ecmwf_cont_mon(:,5); +% ecmwf_mon(:,8) = ecmwf_cont_mon(:,6); +% ecmwf_mon(:,9) = ecmwf_cont_mon(:,7); +% ecmwf_mon(:,10) = ecmwf_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% load /media/storage/Data/Precipitation/MERRA/MERRA_PREC.mat +% merra_glob_ann = comp_glob_corr(gpcc_prec,merra_prec, [1989 2006],1, 'annual', [4 5 9], -9999); +% merra_glob_mon = comp_glob_corr(gpcc_prec,merra_prec, [1989 2006],1, 'monthly', [4 5 9], -9999); +% +% merra_nh_ann = comp_glob_corr(gpcc_prec,merra_prec, [1989 2006],4, 'annual', [4 5 9], -9999); +% merra_nh_mon = comp_glob_corr(gpcc_prec,merra_prec, [1989 2006],4, 'monthly', [4 5 9], -9999); +% +% merra_sh_ann = comp_glob_corr(gpcc_prec,merra_prec, [1989 2006],5, 'annual', [4 5 9], -9999); +% merra_sh_mon = comp_glob_corr(gpcc_prec,merra_prec, [1989 2006],5, 'monthly', [4 5 9], -9999); +% +% merra_trpc_ann = comp_glob_corr(gpcc_prec,merra_prec, [1989 2006],6, 'annual', [4 5 9], -9999); +% merra_trpc_mon = comp_glob_corr(gpcc_prec,merra_prec, [1989 2006],6, 'monthly', [4 5 9], -9999); +% +% merra_cont_ann = comp_cont_corr(gpcc_prec,merra_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999); +% merra_cont_mon = comp_cont_corr(gpcc_prec,merra_prec, [1989 2006], 'monthly', [7 6 3 8 9 1],[4 5 9], -9999); +% clear merra_prec +% +% +% merra_ann(:,1) = merra_glob_ann(:,2); +% merra_ann(:,2) = merra_nh_ann(:,2); +% merra_ann(:,3) = merra_sh_ann(:,2); +% merra_ann(:,4) = merra_cont_ann(:,2); +% merra_ann(:,5) = merra_cont_ann(:,3); +% merra_ann(:,6) = merra_cont_ann(:,4); +% merra_ann(:,7) = merra_cont_ann(:,5); +% merra_ann(:,8) = merra_cont_ann(:,6); +% merra_ann(:,9) = merra_cont_ann(:,7); +% merra_ann(:,10) = merra_trpc_ann(:,2); +% +% merra_mon(:,1) = merra_glob_mon(:,2); +% merra_mon(:,2) = merra_nh_mon(:,2); +% merra_mon(:,3) = merra_sh_mon(:,2); +% merra_mon(:,4) = merra_cont_mon(:,2); +% merra_mon(:,5) = merra_cont_mon(:,3); +% merra_mon(:,6) = merra_cont_mon(:,4); +% merra_mon(:,7) = merra_cont_mon(:,5); +% merra_mon(:,8) = merra_cont_mon(:,6); +% merra_mon(:,9) = merra_cont_mon(:,7); +% merra_mon(:,10) = merra_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% load /media/storage/Data/Precipitation/CFSR/CFSR_PREC.mat +% cfsr_glob_ann = comp_glob_corr(gpcc_prec,cfsr_prec, [1989 2006],1, 'annual', [4 5 9], -9999); +% cfsr_glob_mon = comp_glob_corr(gpcc_prec,cfsr_prec, [1989 2006],1, 'monthly', [4 5 9], -9999); +% +% cfsr_nh_ann = comp_glob_corr(gpcc_prec,cfsr_prec, [1989 2006],4, 'annual', [4 5 9], -9999); +% cfsr_nh_mon = comp_glob_corr(gpcc_prec,cfsr_prec, [1989 2006],4, 'monthly', [4 5 9], -9999); +% +% cfsr_sh_ann = comp_glob_corr(gpcc_prec,cfsr_prec, [1989 2006],5, 'annual', [4 5 9], -9999); +% cfsr_sh_mon = comp_glob_corr(gpcc_prec,cfsr_prec, [1989 2006],5, 'monthly', [4 5 9], -9999); +% +% cfsr_trpc_ann = comp_glob_corr(gpcc_prec,cfsr_prec, [1989 2006],6, 'annual', [4 5 9], -9999); +% cfsr_trpc_mon = comp_glob_corr(gpcc_prec,cfsr_prec, [1989 2006],6, 'monthly', [4 5 9], -9999); +% +% cfsr_cont_ann = comp_cont_corr(gpcc_prec,cfsr_prec, [1989 2006], 'annual', [7 6 3 8 9 1],[4 5 9], -9999); +% cfsr_cont_mon = comp_cont_corr(gpcc_prec,cfsr_prec, [1989 2006], 'monthly',[7 6 3 8 9 1], [4 5 9], -9999); +% clear cfsr_prec +% +% +% cfsr_ann(:,1) = cfsr_glob_ann(:,2); +% cfsr_ann(:,2) = cfsr_nh_ann(:,2); +% cfsr_ann(:,3) = cfsr_sh_ann(:,2); +% cfsr_ann(:,4) = cfsr_cont_ann(:,2); +% cfsr_ann(:,5) = cfsr_cont_ann(:,3); +% cfsr_ann(:,6) = cfsr_cont_ann(:,4); +% cfsr_ann(:,7) = cfsr_cont_ann(:,5); +% cfsr_ann(:,8) = cfsr_cont_ann(:,6); +% cfsr_ann(:,9) = cfsr_cont_ann(:,7); +% cfsr_ann(:,10) = cfsr_trpc_ann(:,2); +% +% cfsr_mon(:,1) = cfsr_glob_mon(:,2); +% cfsr_mon(:,2) = cfsr_nh_mon(:,2); +% cfsr_mon(:,3) = cfsr_sh_mon(:,2); +% cfsr_mon(:,4) = cfsr_cont_mon(:,2); +% cfsr_mon(:,5) = cfsr_cont_mon(:,3); +% cfsr_mon(:,6) = cfsr_cont_mon(:,4); +% cfsr_mon(:,7) = cfsr_cont_mon(:,5); +% cfsr_mon(:,8) = cfsr_cont_mon(:,6); +% cfsr_mon(:,9) = cfsr_cont_mon(:,7); +% cfsr_mon(:,10) = cfsr_trpc_mon(:,2); +% +% clear *nhsh* *glob* *cont* +% +% +% save corr_timeseries.mat +% +% +% +% text_str{1} = 'Global land'; +% text_str{2} = 'Northern hemisphere'; +% text_str{3} = 'Southern hemisphere'; +% text_str{4} = 'North America'; +% text_str{5} = 'South America'; +% text_str{6} = 'Europe'; +% text_str{7} = 'Africa'; +% text_str{8} = 'Asia'; +% text_str{9} = 'Australia'; +% text_str{10} = '15S - 15N (Tropics)'; +% +% yrs{1} = ' '; +% yrs{2} = '1990'; +% yrs{3} = ' '; +% yrs{4} = '1992 '; +% yrs{5} = ' '; +% yrs{6} = '1994 '; +% yrs{7} = ' '; +% yrs{8} = '1996 '; +% yrs{9} = ' '; +% yrs{10} = '1998 '; +% yrs{11} = ' '; +% yrs{12} = '2000'; +% yrs{13} = ' '; +% yrs{14} = '2002 '; +% yrs{15} = ' '; +% yrs{16} = '2004 '; +% yrs{17} = ' '; +% yrs{18} = '2006 '; +% +% mnths{1} = 'J'; +% mnths{2} = 'F'; +% mnths{3} = 'M'; +% mnths{4} = 'A'; +% mnths{5} = 'M'; +% mnths{6} = 'J'; +% mnths{7} = 'J'; +% mnths{8} = 'A'; +% mnths{9} = 'S'; +% mnths{10} = 'O'; +% mnths{11} = 'N'; +% mnths{12} = 'D'; +% +% fnames_ann{1} = 'R_ann_glob_ts'; +% fnames_ann{2} = 'R_ann_NH_ts'; +% fnames_ann{3} = 'R_ann_SH_ts'; +% fnames_ann{4} = 'R_ann_NA_ts'; +% fnames_ann{5} = 'R_ann_SA_ts'; +% fnames_ann{6} = 'R_ann_E_ts'; +% fnames_ann{7} = 'R_ann_AF_ts'; +% fnames_ann{8} = 'R_ann_AS_ts'; +% fnames_ann{9} = 'R_ann_AU_ts'; +% fnames_ann{10} = 'R_ann_TR_ts'; +% +% fnames_mon{1} = 'R_mon_glob_ts'; +% fnames_mon{2} = 'R_mon_NH_ts'; +% fnames_mon{3} = 'R_mon_SH_ts'; +% fnames_mon{4} = 'R_mon_NA_ts'; +% fnames_mon{5} = 'R_mon_SA_ts'; +% fnames_mon{6} = 'R_mon_E_ts'; +% fnames_mon{7} = 'R_mon_AF_ts'; +% fnames_mon{8} = 'R_mon_AS_ts'; +% fnames_mon{9} = 'R_mon_AU_ts'; +% fnames_mon{10} = 'R_mon_TR_ts'; + +for i = 1:10 + h = figure('papersize', [4.5 4], 'paperunits', 'centimeters') + plot(gpcp_ann(:,i), 'c', 'linewidth', 1.5); + hold on + plot(cru_ann(:,i), 'm', 'linewidth', 1.5); + plot(cpc_ann(:,i), 'y', 'linewidth', 1.5); + plot(ecmwf_ann(:,i), 'b', 'linewidth', 1.5); + plot(merra_ann(:,i), 'r', 'linewidth', 1.5); + plot(cfsr_ann(:,i), 'g', 'linewidth', 1.5); + + grid on + axis([1 18 0.4 1]); + set(gca, 'xtick', 1:1:18); + set(gca, 'ytick', 0.4:0.1:1); + set(gca, 'xticklabel', yrs, 'fontsize', 16); + pbaspect([17 6 1]); + + + text(2, 0.9, text_str{i} , 'fontsize', 20); +if i == 1 + leg = legend('GPCP', 'CRU', 'CPC', 'INTERIM', 'MERRA', 'CFSR', 'location','Best'); + set(leg, 'fontsize', 12) + keyboard + end + print(h, '-depsc2', fnames_ann{i}); + clear h + + + h = figure('papersize', [4.5 4], 'paperunits', 'centimeters') + plot(gpcp_mon(:,i), 'c', 'linewidth', 1.5); + hold on + plot(cru_mon(:,i), 'm', 'linewidth', 1.5); + plot(cpc_mon(:,i), 'y', 'linewidth', 1.5); + plot(ecmwf_mon(:,i), 'b', 'linewidth', 1.5); + plot(merra_mon(:,i), 'r', 'linewidth', 1.5); + plot(cfsr_mon(:,i), 'g', 'linewidth', 1.5); + + grid on + axis([1 12 0.4 1]); + set(gca, 'xtick', 1:1:12); + set(gca, 'ytick', 0.4:0.1:1); + set(gca, 'xticklabel', mnths, 'fontsize', 16); + pbaspect([12 6 1]); + + + text(2, 0.9, text_str{i} , 'fontsize', 20); + if i == 1 + leg = legend('GPCP', 'CRU', 'CPC', 'INTERIM', 'MERRA', 'CFSR', 'location','Best'); + set(leg, 'fontsize', 12) + keyboard + end + print(h, '-depsc2', fnames_mon{i}); + clear h + close all +end + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/glob_stats.m b/glob_stats.m new file mode 100644 index 0000000..7642513 --- /dev/null +++ b/glob_stats.m @@ -0,0 +1,48 @@ +function [tot_ref, tot_obs] = glob_stats(ref_field, mval, varargin) + +theta = (0.25:0.5:179.75)'; + +A = area_wghts(theta, 0.5) +A = A*ones(1,720); + + +for i = 1:length(ref_field) + % This step ensures that both the ref_field and obs_field are converted + % to vectors of the same length + tmp_ref = ref_field{i}(ref_field{i}~=mval); + tmp_A = A(ref_field{i}~=mval); + A_tot = sum(tmp_A); + wt_ref = tmp_A.*tmp_ref/A_tot; + tot_ref(i,1) = sum(wt_ref); + for k = 1:length(varargin) + tmp_obs(:,k) = varargin{k}{i}(ref_field{i}~=mval); + wt_obs = tmp_A.*tmp_obs(:,k)/A_tot; + tot_obs(i,k) = sum(wt_obs); + end + +% keyboard + + +% keyboard + + + + + +end + +% [R E sig] = taylor_stats(tot_ref, tot_obs); + + + + + + + + + + + + + + diff --git a/grace_cov_mean.m b/grace_cov_mean.m new file mode 100644 index 0000000..9f9df34 --- /dev/null +++ b/grace_cov_mean.m @@ -0,0 +1,71 @@ +function Q_mean = grace_cov_mean(inames, type, oname) + +% The function computes a full mean covariance matrix of GRACE. It can be +% chosen between deviations from the mean or derivatives . +%-------------------------------------------------------------------------- +% Input: fnames 'cell' List of filenames which should be +% incorporated in the mean field +% +% type 'string' Defines the type of preprocessing: +% 'mean' -> mean is removed +% 'derr' -> derivatives are computed (TBI) +% oname 'string' Filename where the mean covariance +% matrix should be saved +% +% Output: Q_mean [n x n] Mean covariance matrix +%-------------------------------------------------------------------------- +% Author: Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% Date: October 20011 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +if nargin < 3 + oname = 'Q_mean.mat'; +end + +if nargin < 2 + type = 'mean'; +end + + +n = length(inames); + +load(inames{1}); + +if isstruct(Q) + Q_mean = [Q.NW Q.SW'; Q.SW, Q.SE]; +else + Q_mean = Q; +end + + +hwb = waitbar(0,'Percentage of covariance matrices processed ...'); +set(hwb,'NumberTitle','off','Name','Mean covariance matrix ') + +waitbar((1)/(n)) +for i = 2:n + load(inames{i}); + + if isstruct(Q) + Q_mean = Q_mean + [Q.NW Q.SW'; Q.SW Q.SE]; + else + Q_mean = Q_mean + Q; + end + + clear Q + waitbar((i)/(n)) +end + +tmp = Q_mean/(n^2); +clear Q_mean + +Q_mean.NW = tmp(1:1891, 1:1891); +Q_mean.SW = tmp(1892:end, 1:1891); +Q_mean.SE = tmp(1892:end, 1892:end); + +save(oname, 'Q_mean'); +close(hwb) + + + diff --git a/grace_pre.m b/grace_pre.m new file mode 100644 index 0000000..238eda3 --- /dev/null +++ b/grace_pre.m @@ -0,0 +1,64 @@ +function otpt = prep_grace(inpt, type, clms, seyrs) + +% GRACE-Preprocessing function which can remove a mean field, but also +% e.g. a trend + +if nargin < 4 + seyrs = [2003 2010]; +end + +if nargin < 3 + clms = [5 4 9 10]; +end + +if nargin < 2 + type = 'mean'; +end + + +mnth = cell2mat(inpt(:, clms(1))); +yr = cell2mat(inpt(:, clms(2))); + +s_ind = find(mnth == 1 & yr == seyrs(1)); +e_ind = find(mnth == 12 & yr == seyrs(2)); + +mnth = mnth(s_ind:e_ind); +yr = yr(s_ind:e_ind); + +n = length(mnth); + +signal = inpt(s_ind:e_ind, clms(3)); +error = inpt(s_ind:e_ind, clms(4)); + +maxdeg = size(signal{1}, 1); + +if strcmp(type, 'mean') + mn_sig = zeros(size(signal{1})); + mn_err = zeros(size(error{1})); + + for i = 1:n + mn_sig = mn_sig + signal{i}; + mn_err = mn_err + error{i}.^2; + end + + mn_sig = mn_sig/n; + mn_err = mn_err/n^2; + + for i = 1:n + otpt{i,1} = mnth(i); + otpt{i,2} = yr(i); + otpt{i,3} = signal{i} - mn_sig; + otpt{i,4} = sqrt(error{i} + mn_err); + otpt{i,5} = degvar(otpt{i,3}, maxdeg, 0, 'none', 0, 0); + otpt{i,6} = degvar(otpt{i,4}, maxdeg, 0, 'none', 0, 0); + end + + + end + + + + + + + \ No newline at end of file diff --git a/grib2netcdf.m b/grib2netcdf.m new file mode 100644 index 0000000..fe5952d --- /dev/null +++ b/grib2netcdf.m @@ -0,0 +1,105 @@ +function [] = grib2netcdf(fname, varname, outnme, sdate) + + +if nargin < 4 + mnth = 1; + yr = 1979; +else + mnth = sdate(1); + yr = sdate(2); +end + +tme = nj_varget(fname, 'time'); +tme_axis = nj_attget(fname, 'time', '_CoordinateAxisType'); + + +lon = nj_varget(fname, 'lon'); +lon_units = nj_attget(fname, 'lon', 'units'); +lon_name = nj_attget(fname, 'lon', 'long_name'); +lon_axis = nj_attget(fname, 'lon', '_CoordinateAxisType'); + +lat = nj_varget(fname, 'lat'); +lat_units = nj_attget(fname, 'lat', 'units'); +lat_name = nj_attget(fname, 'lat', 'long_name'); +lat_axis = nj_attget(fname, 'lat', '_CoordinateAxisType'); + +unit = nj_attget(fname, varname, 'units'); +name = nj_attget(fname, varname, 'long_name'); +mval = nj_attget(fname, varname, 'missing_value'); + + + +for i = 1:length(tme) + + tmp = nj_varget(fname, varname, [i 1 1], [1 inf inf]); + tmp = tmp'; + + data(1,:,:) = tmp; + + + tme_units = ['days since ', num2str(yr), '-', num2str(mnth), '-1 0']; + + if mnth < 10 + outnme_f = [outnme, num2str(yr), '0', num2str(mnth), '.nc']; + else + outnme_f = [outnme, num2str(yr), num2str(mnth), '.nc']; + end + + + ncid = netcdf.create(outnme_f, 'NC_WRITE'); + + time_dim_id = netcdf.defDim(ncid, 'time', 1); + lon_dim_id = netcdf.defDim(ncid, 'longitude', length(lon)); + lat_dim_id = netcdf.defDim(ncid, 'latitude', length(lat)); + + time_var_id = netcdf.defVar(ncid, 'time', 'double', time_dim_id); + lon_var_id = netcdf.defVar(ncid, 'longitude', 'double', lon_dim_id); + lat_var_id = netcdf.defVar(ncid, 'latitude', 'double', lat_dim_id); + + data_var_id = netcdf.defVar(ncid, varname, 'double', ... + [lon_dim_id lat_dim_id time_dim_id]); + + + netcdf.endDef(ncid); + + netcdf.putVar(ncid, time_var_id, 0); + netcdf.putVar(ncid, lon_var_id, lon); + netcdf.putVar(ncid, lat_var_id, lat); + netcdf.putVar(ncid, data_var_id, data); + + netcdf.reDef(ncid) + netcdf.putAtt(ncid, time_var_id, 'units', tme_units); + netcdf.putAtt(ncid, time_var_id, '_CoordinateAxisType', tme_axis); + + + netcdf.putAtt(ncid, lon_var_id, 'units', lon_units); + netcdf.putAtt(ncid, lon_var_id, 'long_name', lon_name); + netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', lon_axis); + + + netcdf.putAtt(ncid, lat_var_id, 'units', lat_units); + netcdf.putAtt(ncid, lat_var_id, 'long_name', lat_name); + netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', lat_axis); + + + netcdf.putAtt(ncid, data_var_id, 'units', unit); + netcdf.putAtt(ncid, data_var_id, 'long_name', name); + netcdf.putAtt(ncid, data_var_id, 'missing_value', mval); + + netcdf.close(ncid); + + mnth = mnth + 1; + if mnth == 13 + mnth = 1; + yr = yr + 1; + end +end + + + + + + + + + \ No newline at end of file diff --git a/grid2gmt.m b/grid2gmt.m new file mode 100644 index 0000000..4290298 --- /dev/null +++ b/grid2gmt.m @@ -0,0 +1,51 @@ +function A = grid2gmt(A_old, n, Phi, Lambda) + +% grid2gmt.m transforms a grid field into a format, which is readable by +% GMT. The function either transforms the whole input field (default) or +% only a particular area, defined by the corners Theta_min, Theta_max, +% Lambda_min and Lambda_max +%-------------------------------------------------------------------------- +% Input: A_old [k x j] 180/n x 360/n matrix +% n [1 x 1] angular side length of a pixel +% (default: n = 0.5) +% lim [1 x 4] optional vector with four corners, +% i.e. Theta_min, Theta_max,Lambda_min and +% Lambda_max +% +% Output: A [h x 3] Matrix which can be safed as .txt file. +% The output is readable by GMT +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: 8. Sep. 08 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +[r c] = size(A_old); +max_A = r*c; + +A = zeros(max_A, 3); + +A(:,3) = reshape(flipud(A_old), max_A, 1); + +if nargin < 2 + n = 0.5; +end + +if nargin < 3 + Phi = -90+n/2:n:90-n/2; + Lambda = -180+n/2:n:180; +end + +k1 = ones(1, length(Phi)); +k2 = ones(1, length(Lambda)); +Lambda_ = Lambda'*k1; +Phi_ = Phi'*k2; + +A(:,1) = reshape(Lambda_', max_A, 1); +A(:,2) = reshape(Phi_, max_A, 1); + + + + + diff --git a/gridfit.m b/gridfit.m new file mode 100644 index 0000000..d80d205 --- /dev/null +++ b/gridfit.m @@ -0,0 +1,1025 @@ +function [zgrid,xgrid,ygrid] = gridfit(x,y,z,xnodes,ynodes,varargin) +% gridfit: estimates a surface on a 2d grid, based on scattered data +% Replicates are allowed. All methods extrapolate to the grid +% boundaries. Gridfit uses a modified ridge estimator to +% generate the surface, where the bias is toward smoothness. +% +% Gridfit is not an interpolant. Its goal is a smooth surface +% that approximates your data, but allows you to control the +% amount of smoothing. +% +% usage #1: zgrid = gridfit(x,y,z,xnodes,ynodes); +% usage #2: [zgrid,xgrid,ygrid] = gridfit(x,y,z,xnodes,ynodes); +% usage #3: zgrid = gridfit(x,y,z,xnodes,ynodes,prop,val,prop,val,...); +% +% Arguments: (input) +% x,y,z - vectors of equal lengths, containing arbitrary scattered data +% The only constraint on x and y is they cannot ALL fall on a +% single line in the x-y plane. Replicate points will be treated +% in a least squares sense. +% +% ANY points containing a NaN are ignored in the estimation +% +% xnodes - vector defining the nodes in the grid in the independent +% variable (x). xnodes need not be equally spaced. xnodes +% must completely span the data. If they do not, then the +% 'extend' property is applied, adjusting the first and last +% nodes to be extended as necessary. See below for a complete +% description of the 'extend' property. +% +% If xnodes is a scalar integer, then it specifies the number +% of equally spaced nodes between the min and max of the data. +% +% ynodes - vector defining the nodes in the grid in the independent +% variable (y). ynodes need not be equally spaced. +% +% If ynodes is a scalar integer, then it specifies the number +% of equally spaced nodes between the min and max of the data. +% +% Also see the extend property. +% +% Additional arguments follow in the form of property/value pairs. +% Valid properties are: +% 'smoothness', 'interp', 'regularizer', 'solver', 'maxiter' +% 'extend', 'tilesize', 'overlap' +% +% Any UNAMBIGUOUS shortening (even down to a single letter) is +% valid for property names. All properties have default values, +% chosen (I hope) to give a reasonable result out of the box. +% +% 'smoothness' - scalar or vector of length 2 - determines the +% eventual smoothness of the estimated surface. A larger +% value here means the surface will be smoother. Smoothness +% must be a non-negative real number. +% +% If this parameter is a vector of length 2, then it defines +% the relative smoothing to be associated with the x and y +% variables. This allows the user to apply a different amount +% of smoothing in the x dimension compared to the y dimension. +% +% Note: the problem is normalized in advance so that a +% smoothness of 1 MAY generate reasonable results. If you +% find the result is too smooth, then use a smaller value +% for this parameter. Likewise, bumpy surfaces suggest use +% of a larger value. (Sometimes, use of an iterative solver +% with too small a limit on the maximum number of iterations +% will result in non-convergence.) +% +% DEFAULT: 1 +% +% +% 'interp' - character, denotes the interpolation scheme used +% to interpolate the data. +% +% DEFAULT: 'triangle' +% +% 'bilinear' - use bilinear interpolation within the grid +% (also known as tensor product linear interpolation) +% +% 'triangle' - split each cell in the grid into a triangle, +% then linear interpolation inside each triangle +% +% 'nearest' - nearest neighbor interpolation. This will +% rarely be a good choice, but I included it +% as an option for completeness. +% +% +% 'regularizer' - character flag, denotes the regularization +% paradignm to be used. There are currently three options. +% +% DEFAULT: 'gradient' +% +% 'diffusion' or 'laplacian' - uses a finite difference +% approximation to the Laplacian operator (i.e, del^2). +% +% We can think of the surface as a plate, wherein the +% bending rigidity of the plate is specified by the user +% as a number relative to the importance of fidelity to +% the data. A stiffer plate will result in a smoother +% surface overall, but fit the data less well. I've +% modeled a simple plate using the Laplacian, del^2. (A +% projected enhancement is to do a better job with the +% plate equations.) +% +% We can also view the regularizer as a diffusion problem, +% where the relative thermal conductivity is supplied. +% Here interpolation is seen as a problem of finding the +% steady temperature profile in an object, given a set of +% points held at a fixed temperature. Extrapolation will +% be linear. Both paradigms are appropriate for a Laplacian +% regularizer. +% +% 'gradient' - attempts to ensure the gradient is as smooth +% as possible everywhere. Its subtly different from the +% 'diffusion' option, in that here the directional +% derivatives are biased to be smooth across cell +% boundaries in the grid. +% +% The gradient option uncouples the terms in the Laplacian. +% Think of it as two coupled PDEs instead of one PDE. Why +% are they different at all? The terms in the Laplacian +% can balance each other. +% +% 'springs' - uses a spring model connecting nodes to each +% other, as well as connecting data points to the nodes +% in the grid. This choice will cause any extrapolation +% to be as constant as possible. +% +% Here the smoothing parameter is the relative stiffness +% of the springs connecting the nodes to each other compared +% to the stiffness of a spting connecting the lattice to +% each data point. Since all springs have a rest length +% (length at which the spring has zero potential energy) +% of zero, any extrapolation will be minimized. +% +% Note: The 'springs' regularizer tends to drag the surface +% towards the mean of all the data, so too large a smoothing +% parameter may be a problem. +% +% +% 'solver' - character flag - denotes the solver used for the +% resulting linear system. Different solvers will have +% different solution times depending upon the specific +% problem to be solved. Up to a certain size grid, the +% direct \ solver will often be speedy, until memory +% swaps causes problems. +% +% What solver should you use? Problems with a significant +% amount of extrapolation should avoid lsqr. \ may be +% best numerically for small smoothnesss parameters and +% high extents of extrapolation. +% +% Large numbers of points will slow down the direct +% \, but when applied to the normal equations, \ can be +% quite fast. Since the equations generated by these +% methods will tend to be well conditioned, the normal +% equations are not a bad choice of method to use. Beware +% when a small smoothing parameter is used, since this will +% make the equations less well conditioned. +% +% DEFAULT: 'normal' +% +% '\' - uses matlab's backslash operator to solve the sparse +% system. 'backslash' is an alternate name. +% +% 'symmlq' - uses matlab's iterative symmlq solver +% +% 'lsqr' - uses matlab's iterative lsqr solver +% +% 'normal' - uses \ to solve the normal equations. +% +% +% 'maxiter' - only applies to iterative solvers - defines the +% maximum number of iterations for an iterative solver +% +% DEFAULT: min(10000,length(xnodes)*length(ynodes)) +% +% +% 'extend' - character flag - controls whether the first and last +% nodes in each dimension are allowed to be adjusted to +% bound the data, and whether the user will be warned if +% this was deemed necessary to happen. +% +% DEFAULT: 'warning' +% +% 'warning' - Adjust the first and/or last node in +% x or y if the nodes do not FULLY contain +% the data. Issue a warning message to this +% effect, telling the amount of adjustment +% applied. +% +% 'never' - Issue an error message when the nodes do +% not absolutely contain the data. +% +% 'always' - automatically adjust the first and last +% nodes in each dimension if necessary. +% No warning is given when this option is set. +% +% +% 'tilesize' - grids which are simply too large to solve for +% in one single estimation step can be built as a set +% of tiles. For example, a 1000x1000 grid will require +% the estimation of 1e6 unknowns. This is likely to +% require more memory (and time) than you have available. +% But if your data is dense enough, then you can model +% it locally using smaller tiles of the grid. +% +% My recommendation for a reasonable tilesize is +% roughly 100 to 200. Tiles of this size take only +% a few seconds to solve normally, so the entire grid +% can be modeled in a finite amount of time. The minimum +% tilesize can never be less than 3, although even this +% size tile is so small as to be ridiculous. +% +% If your data is so sparse than some tiles contain +% insufficient data to model, then those tiles will +% be left as NaNs. +% +% DEFAULT: inf +% +% +% 'overlap' - Tiles in a grid have some overlap, so they +% can minimize any problems along the edge of a tile. +% In this overlapped region, the grid is built using a +% bi-linear combination of the overlapping tiles. +% +% The overlap is specified as a fraction of the tile +% size, so an overlap of 0.20 means there will be a 20% +% overlap of successive tiles. I do allow a zero overlap, +% but it must be no more than 1/2. +% +% 0 <= overlap <= 0.5 +% +% Overlap is ignored if the tilesize is greater than the +% number of nodes in both directions. +% +% DEFAULT: 0.20 +% +% +% 'autoscale' - Some data may have widely different scales on +% the respective x and y axes. If this happens, then +% the regularization may experience difficulties. +% +% autoscale = 'on' will cause gridfit to scale the x +% and y node intervals to a unit length. This should +% improve the regularization procedure. The scaling is +% purely internal. +% +% autoscale = 'off' will disable automatic scaling +% +% DEFAULT: 'on' +% +% +% Arguments: (output) +% zgrid - (nx,ny) array containing the fitted surface +% +% xgrid, ygrid - as returned by meshgrid(xnodes,ynodes) +% +% +% Speed considerations: +% Remember that gridfit must solve a LARGE system of linear +% equations. There will be as many unknowns as the total +% number of nodes in the final lattice. While these equations +% may be sparse, solving a system of 10000 equations may take +% a second or so. Very large problems may benefit from the +% iterative solvers or from tiling. +% +% +% Example usage: +% +% x = rand(100,1); +% y = rand(100,1); +% z = exp(x+2*y); +% xnodes = 0:.1:1; +% ynodes = 0:.1:1; +% +% g = gridfit(x,y,z,xnodes,ynodes); +% +% Note: this is equivalent to the following call: +% +% g = gridfit(x,y,z,xnodes,ynodes, ... +% 'smooth',1, ... +% 'interp','triangle', ... +% 'solver','normal', ... +% 'regularizer','gradient', ... +% 'extend','warning', ... +% 'tilesize',inf); +% +% +% Author: John D'Errico +% e-mail address: woodchips@rochester.rr.com +% Release: 2.0 +% Release date: 5/23/06 + +% set defaults +params.smoothness = 1; +params.interp = 'triangle'; +params.regularizer = 'gradient'; +params.solver = 'backslash'; +params.maxiter = []; +params.extend = 'warning'; +params.tilesize = inf; +params.overlap = 0.20; +params.mask = []; +params.autoscale = 'on'; +params.xscale = 1; +params.yscale = 1; + +% was the params struct supplied? +if ~isempty(varargin) + if isstruct(varargin{1}) + % params is only supplied if its a call from tiled_gridfit + params = varargin{1}; + if length(varargin)>1 + % check for any overrides + params = parse_pv_pairs(params,varargin{2:end}); + end + else + % check for any overrides of the defaults + params = parse_pv_pairs(params,varargin); + + end +end + +% check the parameters for acceptability +params = check_params(params); + +% ensure all of x,y,z,xnodes,ynodes are column vectors, +% also drop any NaN data +x=x(:); +y=y(:); +z=z(:); +k = isnan(x) | isnan(y) | isnan(z); +if any(k) + x(k)=[]; + y(k)=[]; + z(k)=[]; +end +xmin = min(x); +xmax = max(x); +ymin = min(y); +ymax = max(y); + +% did they supply a scalar for the nodes? +if length(xnodes)==1 + xnodes = linspace(xmin,xmax,xnodes)'; + xnodes(end) = xmax; % make sure it hits the max +end +if length(ynodes)==1 + ynodes = linspace(ymin,ymax,ynodes)'; + ynodes(end) = ymax; % make sure it hits the max +end + +xnodes=xnodes(:); +ynodes=ynodes(:); +dx = diff(xnodes); +dy = diff(ynodes); +nx = length(xnodes); +ny = length(ynodes); +ngrid = nx*ny; + +% set the scaling if autoscale was on +if strcmpi(params.autoscale,'on') + params.xscale = mean(dx); + params.yscale = mean(dy); + params.autoscale = 'off'; +end + +% check to see if any tiling is necessary +if (params.tilesize < max(nx,ny)) + % split it into smaller tiles. compute zgrid and ygrid + % at the very end if requested + zgrid = tiled_gridfit(x,y,z,xnodes,ynodes,params); +else + % its a single tile. + + % mask must be either an empty array, or a boolean + % aray of the same size as the final grid. + nmask = size(params.mask); + if ~isempty(params.mask) && ((nmask(2)~=nx) || (nmask(1)~=ny)) + if ((nmask(2)==ny) || (nmask(1)==nx)) + error 'Mask array is probably transposed from proper orientation.' + else + error 'Mask array must be the same size as the final grid.' + end + end + if ~isempty(params.mask) + params.maskflag = 1; + else + params.maskflag = 0; + end + + % default for maxiter? + if isempty(params.maxiter) + params.maxiter = min(10000,nx*ny); + end + + % check lengths of the data + n = length(x); + if (length(y)~=n) || (length(z)~=n) + error 'Data vectors are incompatible in size.' + end + if n<3 + error 'Insufficient data for surface estimation.' + end + + % verify the nodes are distinct + if any(diff(xnodes)<=0) || any(diff(ynodes)<=0) + error 'xnodes and ynodes must be monotone increasing' + end + + % do we need to tweak the first or last node in x or y? + if xminxnodes(end) + switch params.extend + case 'always' + xnodes(end) = xmax; + case 'warning' + warning('GRIDFIT:extend',['xnodes(end) was increased by: ',num2str(xmax-xnodes(end)),', new node = ',num2str(xmax)]) + xnodes(end) = xmax; + case 'never' + error(['Some x (',num2str(xmax),') falls above xnodes(end) by: ',num2str(xmax-xnodes(end))]) + end + end + if yminynodes(end) + switch params.extend + case 'always' + ynodes(end) = ymax; + case 'warning' + warning('GRIDFIT:extend',['ynodes(end) was increased by: ',num2str(ymax-ynodes(end)),', new node = ',num2str(ymax)]) + ynodes(end) = ymax; + case 'never' + error(['Some y (',num2str(ymax),') falls above ynodes(end) by: ',num2str(ymax-ynodes(end))]) + end + end + + % determine which cell in the array each point lies in + [junk,indx] = histc(x,xnodes); %#ok + [junk,indy] = histc(y,ynodes); %#ok + % any point falling at the last node is taken to be + % inside the last cell in x or y. + k=(indx==nx); + indx(k)=indx(k)-1; + k=(indy==ny); + indy(k)=indy(k)-1; + ind = indy + ny*(indx-1); + + % Do we have a mask to apply? + if params.maskflag + % if we do, then we need to ensure that every + % cell with at least one data point also has at + % least all of its corners unmasked. + params.mask(ind) = 1; + params.mask(ind+1) = 1; + params.mask(ind+ny) = 1; + params.mask(ind+ny+1) = 1; + end + + % interpolation equations for each point + tx = min(1,max(0,(x - xnodes(indx))./dx(indx))); + ty = min(1,max(0,(y - ynodes(indy))./dy(indy))); + % Future enhancement: add cubic interpolant + switch params.interp + case 'triangle' + % linear interpolation inside each triangle + k = (tx > ty); + L = ones(n,1); + L(k) = ny; + + t1 = min(tx,ty); + t2 = max(tx,ty); + A = sparse(repmat((1:n)',1,3),[ind,ind+ny+1,ind+L], ... + [1-t2,t1,t2-t1],n,ngrid); + + case 'nearest' + % nearest neighbor interpolation in a cell + k = round(1-ty) + round(1-tx)*ny; + A = sparse((1:n)',ind+k,ones(n,1),n,ngrid); + + case 'bilinear' + % bilinear interpolation in a cell + A = sparse(repmat((1:n)',1,4),[ind,ind+1,ind+ny,ind+ny+1], ... + [(1-tx).*(1-ty), (1-tx).*ty, tx.*(1-ty), tx.*ty], ... + n,ngrid); + + end + rhs = z; + + % do we have relative smoothing parameters? + if numel(params.smoothness) == 1 + % it was scalar, so treat both dimensions equally + smoothparam = params.smoothness; + xyRelativeStiffness = [1;1]; + else + % It was a vector, so anisotropy reigns. + % I've already checked that the vector was of length 2 + smoothparam = sqrt(prod(params.smoothness)); + xyRelativeStiffness = params.smoothness(:)./smoothparam; + end + + % Build regularizer. Add del^4 regularizer one day. + switch params.regularizer + case 'springs' + % zero "rest length" springs + [i,j] = meshgrid(1:nx,1:(ny-1)); + ind = j(:) + ny*(i(:)-1); + m = nx*(ny-1); + stiffness = 1./(dy/params.yscale); + Areg = sparse(repmat((1:m)',1,2),[ind,ind+1], ... + xyRelativeStiffness(2)*stiffness(j(:))*[-1 1], ... + m,ngrid); + + [i,j] = meshgrid(1:(nx-1),1:ny); + ind = j(:) + ny*(i(:)-1); + m = (nx-1)*ny; + stiffness = 1./(dx/params.xscale); + Areg = [Areg;sparse(repmat((1:m)',1,2),[ind,ind+ny], ... + xyRelativeStiffness(1)*stiffness(i(:))*[-1 1],m,ngrid)]; + + [i,j] = meshgrid(1:(nx-1),1:(ny-1)); + ind = j(:) + ny*(i(:)-1); + m = (nx-1)*(ny-1); + stiffness = 1./sqrt((dx(i(:))/params.xscale/xyRelativeStiffness(1)).^2 + ... + (dy(j(:))/params.yscale/xyRelativeStiffness(2)).^2); + + Areg = [Areg;sparse(repmat((1:m)',1,2),[ind,ind+ny+1], ... + stiffness*[-1 1],m,ngrid)]; + + Areg = [Areg;sparse(repmat((1:m)',1,2),[ind+1,ind+ny], ... + stiffness*[-1 1],m,ngrid)]; + + case {'diffusion' 'laplacian'} + % thermal diffusion using Laplacian (del^2) + [i,j] = meshgrid(1:nx,2:(ny-1)); + ind = j(:) + ny*(i(:)-1); + dy1 = dy(j(:)-1)/params.yscale; + dy2 = dy(j(:))/params.yscale; + + Areg = sparse(repmat(ind,1,3),[ind-1,ind,ind+1], ... + xyRelativeStiffness(2)*[-2./(dy1.*(dy1+dy2)), ... + 2./(dy1.*dy2), -2./(dy2.*(dy1+dy2))],ngrid,ngrid); + + [i,j] = meshgrid(2:(nx-1),1:ny); + ind = j(:) + ny*(i(:)-1); + dx1 = dx(i(:)-1)/params.xscale; + dx2 = dx(i(:))/params.xscale; + + Areg = Areg + sparse(repmat(ind,1,3),[ind-ny,ind,ind+ny], ... + xyRelativeStiffness(1)*[-2./(dx1.*(dx1+dx2)), ... + 2./(dx1.*dx2), -2./(dx2.*(dx1+dx2))],ngrid,ngrid); + + case 'gradient' + % Subtly different from the Laplacian. A point for future + % enhancement is to do it better for the triangle interpolation + % case. + [i,j] = meshgrid(1:nx,2:(ny-1)); + ind = j(:) + ny*(i(:)-1); + dy1 = dy(j(:)-1)/params.yscale; + dy2 = dy(j(:))/params.yscale; + + Areg = sparse(repmat(ind,1,3),[ind-1,ind,ind+1], ... + xyRelativeStiffness(2)*[-2./(dy1.*(dy1+dy2)), ... + 2./(dy1.*dy2), -2./(dy2.*(dy1+dy2))],ngrid,ngrid); + + [i,j] = meshgrid(2:(nx-1),1:ny); + ind = j(:) + ny*(i(:)-1); + dx1 = dx(i(:)-1)/params.xscale; + dx2 = dx(i(:))/params.xscale; + + Areg = [Areg;sparse(repmat(ind,1,3),[ind-ny,ind,ind+ny], ... + xyRelativeStiffness(1)*[-2./(dx1.*(dx1+dx2)), ... + 2./(dx1.*dx2), -2./(dx2.*(dx1+dx2))],ngrid,ngrid)]; + + end + nreg = size(Areg,1); + + % Append the regularizer to the interpolation equations, + % scaling the problem first. Use the 1-norm for speed. + NA = norm(A,1); + NR = norm(Areg,1); + A = [A;Areg*(smoothparam*NA/NR)]; + rhs = [rhs;zeros(nreg,1)]; + % do we have a mask to apply? + if params.maskflag + unmasked = find(params.mask); + end + % solve the full system, with regularizer attached + switch params.solver + case {'\' 'backslash'} + if params.maskflag + % there is a mask to use + zgrid=nan(ny,nx); + zgrid(unmasked) = A(:,unmasked)\rhs; + else + % no mask + zgrid = reshape(A\rhs,ny,nx); + end + + case 'normal' + % The normal equations, solved with \. Can be faster + % for huge numbers of data points, but reasonably + % sized grids. The regularizer makes A well conditioned + % so the normal equations are not a terribly bad thing + % here. + if params.maskflag + % there is a mask to use + Aunmasked = A(:,unmasked); + zgrid=nan(ny,nx); + zgrid(unmasked) = (Aunmasked'*Aunmasked)\(Aunmasked'*rhs); + else + zgrid = reshape((A'*A)\(A'*rhs),ny,nx); + end + + case 'symmlq' + % iterative solver - symmlq - requires a symmetric matrix, + % so use it to solve the normal equations. No preconditioner. + tol = abs(max(z)-min(z))*1.e-13; + if params.maskflag + % there is a mask to use + zgrid=nan(ny,nx); + [zgrid(unmasked),flag] = symmlq(A(:,unmasked)'*A(:,unmasked), ... + A(:,unmasked)'*rhs,tol,params.maxiter); + else + [zgrid,flag] = symmlq(A'*A,A'*rhs,tol,params.maxiter); + zgrid = reshape(zgrid,ny,nx); + end + % display a warning if convergence problems + switch flag + case 0 + % no problems with convergence + case 1 + % SYMMLQ iterated MAXIT times but did not converge. + warning('GRIDFIT:solver',['Symmlq performed ',num2str(params.maxiter), ... + ' iterations but did not converge.']) + case 3 + % SYMMLQ stagnated, successive iterates were the same + warning('GRIDFIT:solver','Symmlq stagnated without apparent convergence.') + otherwise + warning('GRIDFIT:solver',['One of the scalar quantities calculated in',... + ' symmlq was too small or too large to continue computing.']) + end + + case 'lsqr' + % iterative solver - lsqr. No preconditioner here. + tol = abs(max(z)-min(z))*1.e-13; + if params.maskflag + % there is a mask to use + zgrid=nan(ny,nx); + [zgrid(unmasked),flag] = lsqr(A(:,unmasked),rhs,tol,params.maxiter); + else + [zgrid,flag] = lsqr(A,rhs,tol,params.maxiter); + zgrid = reshape(zgrid,ny,nx); + end + + % display a warning if convergence problems + switch flag + case 0 + % no problems with convergence + case 1 + % lsqr iterated MAXIT times but did not converge. + warning('GRIDFIT:solver',['Lsqr performed ', ... + num2str(params.maxiter),' iterations but did not converge.']) + case 3 + % lsqr stagnated, successive iterates were the same + warning('GRIDFIT:solver','Lsqr stagnated without apparent convergence.') + case 4 + warning('GRIDFIT:solver',['One of the scalar quantities calculated in',... + ' LSQR was too small or too large to continue computing.']) + end + + end % switch params.solver + +end % if params.tilesize... + +% only generate xgrid and ygrid if requested. +if nargout>1 + [xgrid,ygrid]=meshgrid(xnodes,ynodes); +end + +% ============================================ +% End of main function - gridfit +% ============================================ + +% ============================================ +% subfunction - parse_pv_pairs +% ============================================ +function params=parse_pv_pairs(params,pv_pairs) +% parse_pv_pairs: parses sets of property value pairs, allows defaults +% usage: params=parse_pv_pairs(default_params,pv_pairs) +% +% arguments: (input) +% default_params - structure, with one field for every potential +% property/value pair. Each field will contain the default +% value for that property. If no default is supplied for a +% given property, then that field must be empty. +% +% pv_array - cell array of property/value pairs. +% Case is ignored when comparing properties to the list +% of field names. Also, any unambiguous shortening of a +% field/property name is allowed. +% +% arguments: (output) +% params - parameter struct that reflects any updated property/value +% pairs in the pv_array. +% +% Example usage: +% First, set default values for the parameters. Assume we +% have four parameters that we wish to use optionally in +% the function examplefun. +% +% - 'viscosity', which will have a default value of 1 +% - 'volume', which will default to 1 +% - 'pie' - which will have default value 3.141592653589793 +% - 'description' - a text field, left empty by default +% +% The first argument to examplefun is one which will always be +% supplied. +% +% function examplefun(dummyarg1,varargin) +% params.Viscosity = 1; +% params.Volume = 1; +% params.Pie = 3.141592653589793 +% +% params.Description = ''; +% params=parse_pv_pairs(params,varargin); +% params +% +% Use examplefun, overriding the defaults for 'pie', 'viscosity' +% and 'description'. The 'volume' parameter is left at its default. +% +% examplefun(rand(10),'vis',10,'pie',3,'Description','Hello world') +% +% params = +% Viscosity: 10 +% Volume: 1 +% Pie: 3 +% Description: 'Hello world' +% +% Note that capitalization was ignored, and the property 'viscosity' +% was truncated as supplied. Also note that the order the pairs were +% supplied was arbitrary. + +npv = length(pv_pairs); +n = npv/2; + +if n~=floor(n) + error 'Property/value pairs must come in PAIRS.' +end +if n<=0 + % just return the defaults + return +end + +if ~isstruct(params) + error 'No structure for defaults was supplied' +end + +% there was at least one pv pair. process any supplied +propnames = fieldnames(params); +lpropnames = lower(propnames); +for i=1:n + p_i = lower(pv_pairs{2*i-1}); + v_i = pv_pairs{2*i}; + + ind = strmatch(p_i,lpropnames,'exact'); + if isempty(ind) + ind = find(strncmp(p_i,lpropnames,length(p_i))); + if isempty(ind) + error(['No matching property found for: ',pv_pairs{2*i-1}]) + elseif length(ind)>1 + error(['Ambiguous property name: ',pv_pairs{2*i-1}]) + end + end + p_i = propnames{ind}; + + % override the corresponding default in params + params = setfield(params,p_i,v_i); %#ok + +end + + +% ============================================ +% subfunction - check_params +% ============================================ +function params = check_params(params) + +% check the parameters for acceptability +% smoothness == 1 by default +if isempty(params.smoothness) + params.smoothness = 1; +else + if (numel(params.smoothness)>2) || any(params.smoothness<=0) + error 'Smoothness must be scalar (or length 2 vector), real, finite, and positive.' + end +end + +% regularizer - must be one of 4 options - the second and +% third are actually synonyms. +valid = {'springs', 'diffusion', 'laplacian', 'gradient'}; +if isempty(params.regularizer) + params.regularizer = 'diffusion'; +end +ind = find(strncmpi(params.regularizer,valid,length(params.regularizer))); +if (length(ind)==1) + params.regularizer = valid{ind}; +else + error(['Invalid regularization method: ',params.regularizer]) +end + +% interp must be one of: +% 'bilinear', 'nearest', or 'triangle' +% but accept any shortening thereof. +valid = {'bilinear', 'nearest', 'triangle'}; +if isempty(params.interp) + params.interp = 'triangle'; +end +ind = find(strncmpi(params.interp,valid,length(params.interp))); +if (length(ind)==1) + params.interp = valid{ind}; +else + error(['Invalid interpolation method: ',params.interp]) +end + +% solver must be one of: +% 'backslash', '\', 'symmlq', 'lsqr', or 'normal' +% but accept any shortening thereof. +valid = {'backslash', '\', 'symmlq', 'lsqr', 'normal'}; +if isempty(params.solver) + params.solver = '\'; +end +ind = find(strncmpi(params.solver,valid,length(params.solver))); +if (length(ind)==1) + params.solver = valid{ind}; +else + error(['Invalid solver option: ',params.solver]) +end + +% extend must be one of: +% 'never', 'warning', 'always' +% but accept any shortening thereof. +valid = {'never', 'warning', 'always'}; +if isempty(params.extend) + params.extend = 'warning'; +end +ind = find(strncmpi(params.extend,valid,length(params.extend))); +if (length(ind)==1) + params.extend = valid{ind}; +else + error(['Invalid extend option: ',params.extend]) +end + +% tilesize == inf by default +if isempty(params.tilesize) + params.tilesize = inf; +elseif (length(params.tilesize)>1) || (params.tilesize<3) + error 'Tilesize must be scalar and > 0.' +end + +% overlap == 0.20 by default +if isempty(params.overlap) + params.overlap = 0.20; +elseif (length(params.overlap)>1) || (params.overlap<0) || (params.overlap>0.5) + error 'Overlap must be scalar and 0 < overlap < 1.' +end + +% ============================================ +% subfunction - tiled_gridfit +% ============================================ +function zgrid=tiled_gridfit(x,y,z,xnodes,ynodes,params) +% tiled_gridfit: a tiled version of gridfit, continuous across tile boundaries +% usage: [zgrid,xgrid,ygrid]=tiled_gridfit(x,y,z,xnodes,ynodes,params) +% +% Tiled_gridfit is used when the total grid is far too large +% to model using a single call to gridfit. While gridfit may take +% only a second or so to build a 100x100 grid, a 2000x2000 grid +% will probably not run at all due to memory problems. +% +% Tiles in the grid with insufficient data (<4 points) will be +% filled with NaNs. Avoid use of too small tiles, especially +% if your data has holes in it that may encompass an entire tile. +% +% A mask may also be applied, in which case tiled_gridfit will +% subdivide the mask into tiles. Note that any boolean mask +% provided is assumed to be the size of the complete grid. +% +% Tiled_gridfit may not be fast on huge grids, but it should run +% as long as you use a reasonable tilesize. 8-) + +% Note that we have already verified all parameters in check_params + +% Matrix elements in a square tile +tilesize = params.tilesize; +% Size of overlap in terms of matrix elements. Overlaps +% of purely zero cause problems, so force at least two +% elements to overlap. +overlap = max(2,floor(tilesize*params.overlap)); + +% reset the tilesize for each particular tile to be inf, so +% we will never see a recursive call to tiled_gridfit +Tparams = params; +Tparams.tilesize = inf; + +nx = length(xnodes); +ny = length(ynodes); +zgrid = zeros(ny,nx); + +% linear ramp for the bilinear interpolation +rampfun = inline('(t-t(1))/(t(end)-t(1))','t'); + +% loop over each tile in the grid +h = waitbar(0,'Relax and have a cup of JAVA. Its my treat.'); +warncount = 0; +xtind = 1:min(nx,tilesize); +while ~isempty(xtind) && (xtind(1)<=nx) + + xinterp = ones(1,length(xtind)); + if (xtind(1) ~= 1) + xinterp(1:overlap) = rampfun(xnodes(xtind(1:overlap))); + end + if (xtind(end) ~= nx) + xinterp((end-overlap+1):end) = 1-rampfun(xnodes(xtind((end-overlap+1):end))); + end + + ytind = 1:min(ny,tilesize); + while ~isempty(ytind) && (ytind(1)<=ny) + % update the waitbar + waitbar((xtind(end)-tilesize)/nx + tilesize*ytind(end)/ny/nx) + + yinterp = ones(length(ytind),1); + if (ytind(1) ~= 1) + yinterp(1:overlap) = rampfun(ynodes(ytind(1:overlap))); + end + if (ytind(end) ~= ny) + yinterp((end-overlap+1):end) = 1-rampfun(ynodes(ytind((end-overlap+1):end))); + end + + % was a mask supplied? + if ~isempty(params.mask) + submask = params.mask(ytind,xtind); + Tparams.mask = submask; + end + + % extract data that lies in this grid tile + k = (x>=xnodes(xtind(1))) & (x<=xnodes(xtind(end))) & ... + (y>=ynodes(ytind(1))) & (y<=ynodes(ytind(end))); + k = find(k); + + if length(k)<4 + if warncount == 0 + warning('GRIDFIT:tiling','A tile was too underpopulated to model. Filled with NaNs.') + end + warncount = warncount + 1; + + % fill this part of the grid with NaNs + zgrid(ytind,xtind) = NaN; + + else + % build this tile + zgtile = gridfit(x(k),y(k),z(k),xnodes(xtind),ynodes(ytind),Tparams); + + % bilinear interpolation (using an outer product) + interp_coef = yinterp*xinterp; + + % accumulate the tile into the complete grid + zgrid(ytind,xtind) = zgrid(ytind,xtind) + zgtile.*interp_coef; + + end + + % step to the next tile in y + if ytind(end)=ny + % extend this tile to the edge + ytind = ytind(1):ny; + end + else + ytind = ny+1; + end + + end % while loop over y + + % step to the next tile in x + if xtind(end)=nx + % extend this tile to the edge + xtind = xtind(1):nx; + end + else + xtind = nx+1; + end + +end % while loop over x + +% close down the waitbar +close(h) + +if warncount>0 + warning('GRIDFIT:tiling',[num2str(warncount),' tiles were underpopulated & filled with NaNs']) +end + + + diff --git a/harm_ana.m b/harm_ana.m new file mode 100644 index 0000000..dac78a1 --- /dev/null +++ b/harm_ana.m @@ -0,0 +1,80 @@ +function [R, Phi] = harm_ana(inpt, thrsh); + + +nts = length(inpt); +[rws, cls] = size(inpt{1}); + +for i = 1:nts + F(i,:) = inpt{i}(:)'; +end + +mn = mean(F, 1); + +F = F - ones(nts, 1)*mn; + +t = (0:nts-1)'; + +A = [ones(nts, 1) t cos(pi/6*t) sin(pi/6*t) cos(pi/3*t) sin(pi/3*t)]; + +for i = 1:size(F, 2) + x(:,i) = inv(A'*A)*A'*F(:, i); +end + +a1 = x(3,:); +b1 = x(4,:); + +a2 = x(5,:); +b2 = x(6,:); + +r1(1,:) = sqrt(a1.^2 + b1.^2); +r1(2,:) = r1(1,:).^2/2; +r1(3,:) = r1(1,:).^2./(2*var(F)); + +r2(1,:) = sqrt(a2.^2 + b2.^2); +r2(2,:) = r2(1,:).^2/2; +r2(3,:) = r2(1,:).^2./(2*var(F)); + +phi1 = atan2(b1, a1); +phi2 = atan2(b2, a2); + +phi1(phi1 < 0) = pi + (pi + phi1(phi1<0)); +phi2(phi2 < 0) = pi + (pi + phi2(phi2<0)); + +phi1 = phi1 + pi/2; +phi1(phi1 > 2*pi) = phi1(phi1 > 2*pi) - 2*pi; + +phi2 = phi2 + pi/2; +phi2(phi2 > 2*pi) = phi2(phi2 > 2*pi) - 2*pi; + +R{1,1} = reshape(r1(1,:), rws, cls); +R{1,2} = reshape(r1(2,:), rws, cls); +R{1,3} = reshape(r1(3,:), rws, cls); + +R{2,1} = reshape(r2(1,:), rws, cls); +R{2,2} = reshape(r2(2,:), rws, cls); +R{2,3} = reshape(r2(3,:), rws, cls); + +Phi{1,1} = reshape(phi1, rws, cls); +Phi{2,1} = reshape(phi2, rws, cls); + +Phi{1,1}(R{1,3} < thrsh) = NaN; +Phi{2,1}(R{2,3} < thrsh) = NaN; + + +for i = 1:3 + R{1,i}(R{1,3} < thrsh) = NaN; + R{2,i}(R{2,3} < thrsh) = NaN; +end + + + + + + + + + + + + + diff --git a/haversine.m b/haversine.m new file mode 100644 index 0000000..6c5bebf --- /dev/null +++ b/haversine.m @@ -0,0 +1,10 @@ +function s = haversine(theta1, theta2, dlambda) + +R = 6378137; + +dlat = abs(theta1 - theta2); + +a = sin(dlat/2)^2 + cos(theta1)*cos(theta2)*sin(dlambda/2)^2; +c = 2*atan2(sqrt(a), sqrt(1-a)); +s = R*c; + diff --git a/inregion.m b/inregion.m new file mode 100644 index 0000000..1abdc9b --- /dev/null +++ b/inregion.m @@ -0,0 +1,149 @@ +function [in, on] = inregion(x,y,xv,yv) +%INREGION True for points inside or on a polygonal region. +% IN = INREGION(X, Y, XV, YV) returns a matrix IN the size of X and Y. +% IN(p,q) = 1 if the point (X(p,q), Y(p,q)) is either strictly inside or +% on the edge of the polygonal region whose vertices are specified by the +% vectors XV and YV; otherwise IN(p,q) = 0. +% +% [IN ON] = INREGION returns a second matrix, ON, which is the size of X +% and Y. ON(p,q) = 1 if the point (X(p,q), Y(p,q)) is on the edge of the +% polygonal region; otherwise ON(p,q) = 0. +% +% INREGION is a modification of INPOLYGON that uses a roundoff error +% compensating tolerance in the cross product sign test. +% +% Example: +% xv = [-3 -3 1 1 3 1 -1 -1 -3]; +% yv = [-3 -1 3 1 1 -1 -1 -3 -3]; +% [x,y] = meshgrid(-3:1/2:3); +% [in,on] = inregion(x,y,xv,yv); +% p = find(in-on); +% q = find(on); +% plot(xv,yv,'-',x(p),y(p),'ko',x(q),y(q),'ro') +% axis([-5 5 -4 4]) + +% If (xv,yv) is not closed, close it. +xv = xv(:); +yv = yv(:); +Nv = length(xv); +if ((xv(1) ~= xv(Nv)) | (yv(1) ~= yv(Nv))) + xv = [xv ; xv(1)]; + yv = [yv ; yv(1)]; + Nv = Nv + 1; +end + +inputSize = size(x); + +x = x(:).'; +y = y(:).'; + +mask = (x >= min(xv)) & (x <= max(xv)) & (y>=min(yv)) & (y<=max(yv)); +if ~any(mask) + in = zeros(inputSize); + on = in; + return +end +inbounds = find(mask); +x = x(mask); +y = y(mask); + + +% Choose block_length to keep memory usage of vec_inpolygon around +% 10 Megabytes. +block_length = 1e5; + +M = prod(size(x)); + +if M*Nv < block_length + if nargout > 1 + [in on] = vec_inpolygon(Nv,x,y,xv,yv); + else + in = vec_inpolygon(Nv,x,y,xv,yv); + end +else + % Process at most N elements at a time + N = ceil(block_length/Nv); + in = false(1,M); + if nargout > 1 + on = false(1,M); + end + n1 = 0; n2 = 0; + while n2 < M, + n1 = n2+1; + n2 = n1+N; + if n2 > M, + n2 = M; + end + if nargout > 1 + [in(n1:n2) on(n1:n2)] = vec_inpolygon(Nv,x(n1:n2),y(n1:n2),xv,yv); + else + in(n1:n2) = vec_inpolygon(Nv,x(n1:n2),y(n1:n2),xv,yv); + end + end +end + +if nargout > 1 + onmask = mask; + onmask(inbounds(~on)) = 0; + on = reshape(onmask, inputSize); +end + +mask(inbounds(~in)) = 0; +% Reshape output matrix. +in = reshape(mask, inputSize); + + +%---------------------------------------------- +function [in, on] = vec_inpolygon(Nv,x,y,xv,yv) +% vectorize the computation. + +% Translate the vertices so that the test points are +% at the origin. + +Np = length(x); +x = x(ones(Nv,1),:); +y = y(ones(Nv,1),:); +xv = xv(:,ones(1,Np)) - x; +yv = yv(:,ones(1,Np)) - y; + +% Compute the quadrant number for the vertices relative +% to the test points. +posX = xv > 0; +posY = yv > 0; +negX = ~posX; +negY = ~posY; +quad = (negX & posY) + 2*(negX & negY) + ... + 3*(posX & negY); + +% Compute the sign() of the cross product and dot product +% of adjacent vertices. +% Modified 09/17/03 to use a tolerance in the cross product sign test. +m = 1:Nv-1; +mp1 = 2:Nv; +crossProduct = xv(m,:) .* yv(mp1,:) - xv(mp1,:) .* yv(m,:); +tol = 10*Nv*(max(abs(xv(:)))+max(abs(yv(:))))*eps; +crossProduct(abs(crossProduct)n , fprintf('jade -> Do not ask more sources than sensors here!!!\n'), return,end +if verbose, fprintf('jade -> Looking for %d sources\n',m); end ; + + +% to do: add a warning about complex signals + +% Mean removal +%============= +if remmn == true + if verbose, fprintf('jade -> Removing the mean value\n'); end + X = X - mean(X')' * ones(1,T); +end + +%%% whitening & projection onto signal subspace +% =========================================== +if verbose, fprintf('jade -> Whitening the data\n'); end + if m == T | m == 0 + [tmp, Ds, U] = svd(X'/sqrt(T), 'econ'); + elseif m == n + [tmp, Ds, U] = svd(X'/sqrt(T)); + else + [tmp, Ds, U] = svds(X'/sqrt(T), m); + end + + Ds = diag(Ds.^2); + B = U'; + scales = sqrt(Ds); + B = diag(1./scales)*B; + + +% [U,D] = eig((X*X')/T) ; %% An eigen basis for the sample covariance matrix +% [Ds,k] = sort(diag(D)) ; %% Sort by increasing variances +% PCs = n:-1:n-m+1 ; %% The m most significant princip. comp. by decreasing variance +% +% %% --- PCA ---------------------------------------------------------- +% B = U(:,k(PCs))' ; % At this stage, B does the PCA on m components +% +% %% --- Scaling ------------------------------------------------------ +% scales = sqrt(Ds(PCs)) ; % The scales of the principal components . +% B = diag(1./scales)*B ; % Now, B does PCA followed by a rescaling = sphering + + +%% --- Sphering ------------------------------------------------------ +X = B*X; %% We have done the easy part: B is a whitening matrix and X is white. + +clear U D Ds k PCs scales ; + +%%% NOTE: At this stage, X is a PCA analysis in m components of the real data, except that +%%% all its entries now have unit variance. Any further rotation of X will preserve the +%%% property that X is a vector of uncorrelated components. It remains to find the +%%% rotation matrix such that the entries of X are not only uncorrelated but also `as +%%% independent as possible'. This independence is measured by correlations of order +%%% higher than 2. We have defined such a measure of independence which +%%% 1) is a reasonable approximation of the mutual information +%%% 2) can be optimized by a `fast algorithm' +%%% This measure of independence also corresponds to the `diagonality' of a set of +%%% cumulant matrices. The code below finds the `missing rotation ' as the matrix which +%%% best diagonalizes a particular set of cumulant matrices. + + +%%% Estimation of the cumulant matrices. +% ==================================== +if verbose, fprintf('jade -> Estimating cumulant matrices\n'); end + +%% Reshaping of the data, hoping to speed up things a little bit... +X = X'; + +dimsymm = (m*(m+1))/2; % Dim. of the space of real symm matrices +nbcm = dimsymm ; % number of cumulant matrices +CM = zeros(m,m*nbcm); % Storage for cumulant matrices +R = eye(m); %% +Qij = zeros(m); % Temp for a cum. matrix +Xim = zeros(m,1); % Temp +Xijm = zeros(m,1); % Temp +Uns = ones(1,m); % for convenience + + +%% I am using a symmetry trick to save storage. I should write a short note one of these +%% days explaining what is going on here. +%% +Range = 1:m ; % will index the columns of CM where to store the cum. mats. + +for im = 1:m + Xim = X(:,im) ; + Xijm= Xim.*Xim ; + %% Note to myself: the -R on next line can be removed: it does not affect + %% the joint diagonalization criterion + Qij = ((Xijm(:,Uns).*X)' * X)/T - R - 2 * R(:,im)*R(:,im)' ; + CM(:,Range) = Qij ; + Range = Range + m ; + for jm = 1:im-1 + Xijm = Xim.*X(:,jm) ; + Qij = sqrt(2) *(((Xijm(:,Uns).*X)' * X)/T - R(:,im)*R(:,jm)' - R(:,jm)*R(:,im)') ; + CM(:,Range) = Qij ; + Range = Range + m ; + end ; +end; +%%%% Now we have nbcm = m(m+1)/2 cumulants matrices stored in a big m x m*nbcm array. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% The inefficient code below does the same as above: computing the big CM cumulant matrix. +%% It is commented out but you can check that it produces the same result. +%% This is supposed to help people understand the (rather obscure) code above. +%% See section 4.2 of the Neural Comp paper referenced below. It can be found at +%% "http://www.tsi.enst.fr/~cardoso/Papers.PS/neuralcomp_2ppf.ps", +%% +%% +%% +%% if 1, +%% +%% %% Step one: we compute the sample cumulants +%% Matcum = zeros(m,m,m,m) ; +%% for i1=1:m, +%% for i2=1:m, +%% for i3=1:m, +%% for i4=1:m, +%% Matcum(i1,i2,i3,i4) = mean( X(:,i1) .* X(:,i2) .* X(:,i3) .* X(:,i4) ) ... +%% - R(i1,i2)*R(i3,i4) ... +%% - R(i1,i3)*R(i2,i4) ... +%% - R(i1,i4)*R(i2,i3) ; +%% end +%% end +%% end +%% end +%% +%% %% Step 2; We compute a basis of the space of symmetric m*m matrices +%% CMM = zeros(m, m, nbcm) ; %% Holds the basis. +%% icm = 0 ; %% index to the elements of the basis +%% vi = zeros(m,1); %% the ith basis vetor of R^m +%% vj = zeros(m,1); %% the jth basis vetor of R^m +%% Id = eye (m) ; %% convenience +%% for im=1:m, +%% vi = Id(:,im) ; +%% icm = icm + 1 ; +%% CMM(:, :, icm) = vi*vi' ; +%% for jm=1:im-1, +%% vj = Id(:,jm) ; +%% icm = icm + 1 ; +%% CMM(:, :, icm) = sqrt(0.5) * (vi*vj'+vj*vi') ; +%% end +%% end +%% %% Now CMM(:,:,i) is the ith element of an orthonormal basis for_ the space of m*m symmetric matrices +%% +%% %% Step 3. We compute the image of each basis element by the cumulant tensor and store it back into CMM. +%% mat = zeros(m) ; %% tmp +%% for icm=1:nbcm +%% mat = squeeze(CMM(:,:,icm)) ; +%% for i1=1:m +%% for i2=1:m +%% CMM(i1, i2, icm) = sum(sum(squeeze(Matcum(i1,i2,:,:)) .* mat )) ; +%% end +%% end +%% end; +%% %% This is doing something like \sum_kl [ Cum(xi,xj,xk,xl) * mat_kl ] +%% +%% %% Step 4. Now, we can check that CMM and CM are equivalent +%% Range = 1:m ; +%% for icm=1:nbcm, +%% M1 = squeeze( CMM(:,:,icm)) ; +%% M2 = CM(:,Range) ; +%% Range = Range + m ; +%% norm (M1-M2, 'fro' ) , %% This should be a numerical zero. +%% end; +%% +%% end; %% End of the demo code for the computation of cumulant matrices +%% +%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + + + +%%% joint diagonalization of the cumulant matrices +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + +%% Init +if 0, %% Init by diagonalizing a *single* cumulant matrix. It seems to save + %% some computation time `sometimes'. Not clear if initialization is really worth + %% it since Jacobi rotations are very efficient. On the other hand, it does not + %% cost much... + + if verbose, fprintf('jade -> Initialization of the diagonalization\n'); end + [V,D] = eig(CM(:,1:m)); % Selectng a particular cumulant matrix. + for u=1:m:m*nbcm, % Accordingly updating the cumulant set given the init + CM(:,u:u+m-1) = CM(:,u:u+m-1)*V ; + end; + CM = V'*CM; + +else, %% The dont-try-to-be-smart init + V = eye(m) ; % la rotation initiale +end; + +%% Computing the initial value of the contrast +Diag = zeros(m,1) ; +On = 0 ; +Range = 1:m ; +for im = 1:nbcm, + Diag = diag(CM(:,Range)) ; + On = On + sum(Diag.*Diag) ; + Range = Range + m ; +end +Off = sum(sum(CM.*CM)) - On ; + + + +seuil = 1.0e-6 / sqrt(T) ; % A statistically scaled threshold on `small' angles +encore = 1; +sweep = 0; % sweep number +updates = 0; % Total number of rotations +upds = 0; % Number of rotations in a given seep +g = zeros(2,nbcm); +gg = zeros(2,2); +G = zeros(2,2); +c = 0 ; +s = 0 ; +ton = 0 ; +toff = 0 ; +theta = 0 ; +Gain = 0 ; + +%% Joint diagonalization proper +if verbose, fprintf('jade -> Contrast optimization by joint diagonalization\n'); end + +while encore, encore=0; + + if verbose, fprintf('jade -> Sweep #%3d',sweep); end + sweep = sweep+1; + upds = 0 ; + Vkeep = V ; + + for p=1:m-1, + for q=p+1:m, + + Ip = p:m:m*nbcm ; + Iq = q:m:m*nbcm ; + + %%% computation of Givens angle + g = [ CM(p,Ip)-CM(q,Iq) ; CM(p,Iq)+CM(q,Ip) ]; + gg = g*g'; + ton = gg(1,1)-gg(2,2); + toff = gg(1,2)+gg(2,1); + theta = 0.5*atan2( toff , ton+sqrt(ton*ton+toff*toff) ); + Gain = (sqrt(ton*ton+toff*toff) - ton) / 4 ; + + %%% Givens update + if abs(theta) > seuil, +%% if Gain > 1.0e-3*On/m/m , + encore = 1 ; + upds = upds + 1; + c = cos(theta); + s = sin(theta); + G = [ c -s ; s c ] ; + + pair = [p;q] ; + V(:,pair) = V(:,pair)*G ; + CM(pair,:) = G' * CM(pair,:) ; + CM(:,[Ip Iq]) = [ c*CM(:,Ip)+s*CM(:,Iq) -s*CM(:,Ip)+c*CM(:,Iq) ] ; + + + On = On + Gain; + Off = Off - Gain; + + %% fprintf('jade -> %3d %3d %12.8f\n',p,q,Off/On); + end%%of the if + end%%of the loop on q + end%%of the loop on p + if verbose, fprintf(' completed in %d rotations\n',upds); end + updates = updates + upds ; + +end%%of the while loop +if verbose, fprintf('jade -> Total of %d Givens rotations\n',updates); end + + +%%% A separating matrix +% =================== +B = V'*B ; + + +%%% Permut the rows of the separating matrix B to get the most energetic components first. +%%% Here the **signals** are normalized to unit variance. Therefore, the sort is +%%% according to the norm of the columns of A = pinv(B) + +% if verbose, fprintf('jade -> Sorting the components\n',updates); end +% A = pinv(B) ; +% [Ds,keys] = sort(sum(A.*A)) ; +% keys +% B = B(keys,:) ; +% B = B(m:-1:1,:) ; % Is this smart ? + + +% Signs are fixed by forcing the first column of B to have non-negative entries. + +if verbose, fprintf('jade -> Fixing the signs\n',updates); end +b = B(:,1) ; +signs = sign(sign(b)+0.1) ; % just a trick to deal with sign=0 +B = diag(signs)*B ; + + + +return ; + + +% To do. +% - Implement a cheaper/simpler whitening (is it worth it?) +% +% Revision history: +% +%- V1.8, May 2005 +% - Added some commented code to explain the cumulant computation tricks. +% - Added reference to the Neural Comp. paper. +% +%- V1.7, Nov. 16, 2002 +% - Reverted the mean removal code to an earlier version (not using +% repmat) to keep the code octave-compatible. Now less efficient, +% but does not make any significant difference wrt the total +% computing cost. +% - Remove some cruft (some debugging figures were created. What +% was this stuff doing there???) +% +% +%- V1.6, Feb. 24, 1997 +% - Mean removal is better implemented. +% - Transposing X before computing the cumulants: small speed-up +% - Still more comments to emphasize the relationship to PCA +% +%- V1.5, Dec. 24 1997 +% - The sign of each row of B is determined by letting the first element be positive. +% +%- V1.4, Dec. 23 1997 +% - Minor clean up. +% - Added a verbose switch +% - Added the sorting of the rows of B in order to fix in some reasonable way the +% permutation indetermination. See note 2) below. +% +%- V1.3, Nov. 2 1997 +% - Some clean up. Released in the public domain. +% +%- V1.2, Oct. 5 1997 +% - Changed random picking of the cumulant matrix used for initialization to a +% deterministic choice. This is not because of a better rationale but to make the +% ouput (almost surely) deterministic. +% - Rewrote the joint diag. to take more advantage of Matlab's tricks. +% - Created more dummy variables to combat Matlab's loose memory management. +% +%- V1.1, Oct. 29 1997. +% Made the estimation of the cumulant matrices more regular. This also corrects a +% buglet... +% +%- V1.0, Sept. 9 1997. Created. +% +% Main references: +% @article{CS-iee-94, +% title = "Blind beamforming for non {G}aussian signals", +% author = "Jean-Fran\c{c}ois Cardoso and Antoine Souloumiac", +% HTML = "ftp://sig.enst.fr/pub/jfc/Papers/iee.ps.gz", +% journal = "IEE Proceedings-F", +% month = dec, number = 6, pages = {362-370}, volume = 140, year = 1993} +% +% +%@article{JADE:NC, +% author = "Jean-Fran\c{c}ois Cardoso", +% journal = "Neural Computation", +% title = "High-order contrasts for independent component analysis", +% HTML = "http://www.tsi.enst.fr/~cardoso/Papers.PS/neuralcomp_2ppf.ps", +% year = 1999, month = jan, volume = 11, number = 1, pages = "157-192"} +% +% +% +% +% Notes: +% ====== +% +% Note 1) The original Jade algorithm/code deals with complex signals in Gaussian noise +% white and exploits an underlying assumption that the model of independent components +% actually holds. This is a reasonable assumption when dealing with some narrowband +% signals. In this context, one may i) seriously consider dealing precisely with the +% noise in the whitening process and ii) expect to use the small number of significant +% eigenmatrices to efficiently summarize all the 4th-order information. All this is done +% in the JADE algorithm. +% +% In *this* implementation, we deal with real-valued signals and we do NOT expect the ICA +% model to hold exactly. Therefore, it is pointless to try to deal precisely with the +% additive noise and it is very unlikely that the cumulant tensor can be accurately +% summarized by its first n eigen-matrices. Therefore, we consider the joint +% diagonalization of the *whole* set of eigen-matrices. However, in such a case, it is +% not necessary to compute the eigenmatrices at all because one may equivalently use +% `parallel slices' of the cumulant tensor. This part (computing the eigen-matrices) of +% the computation can be saved: it suffices to jointly diagonalize a set of cumulant +% matrices. Also, since we are dealing with reals signals, it becomes easier to exploit +% the symmetries of the cumulants to further reduce the number of matrices to be +% diagonalized. These considerations, together with other cheap tricks lead to this +% version of JADE which is optimized (again) to deal with real mixtures and to work +% `outside the model'. As the original JADE algorithm, it works by minimizing a `good +% set' of cumulants. +% +% +% Note 2) The rows of the separating matrix B are resorted in such a way that the columns +% of the corresponding mixing matrix A=pinv(B) are in decreasing order of (Euclidian) +% norm. This is a simple, `almost canonical' way of fixing the indetermination of +% permutation. It has the effect that the first rows of the recovered signals (ie the +% first rows of B*X) correspond to the most energetic *components*. Recall however that +% the source signals in S=B*X have unit variance. Therefore, when we say that the +% observations are unmixed in order of decreasing energy, this energetic signature is to +% be found as the norm of the columns of A=pinv(B) and not as the variances of the +% separated source signals. +% +% +% Note 3) In experiments where JADE is run as B=jadeR(X,m) with m varying in range of +% values, it is nice to be able to test the stability of the decomposition. In order to +% help in such a test, the rows of B can be sorted as described above. We have also +% decided to fix the sign of each row in some arbitrary but fixed way. The convention is +% that the first element of each row of B is positive. +% +% +% Note 4) Contrary to many other ICA algorithms, JADE (or least this version) does not +% operate on the data themselves but on a statistic (the full set of 4th order cumulant). +% This is represented by the matrix CM below, whose size grows as m^2 x m^2 where m is +% the number of sources to be extracted (m could be much smaller than n). As a +% consequence, (this version of) JADE will probably choke on a `large' number of sources. +% Here `large' depends mainly on the available memory and could be something like 40 or +% so. One of these days, I will prepare a version of JADE taking the `data' option +% rather than the `statistic' option. + + +% JadeR.m ends here. diff --git a/jbtest.m b/jbtest.m new file mode 100644 index 0000000..2154c01 --- /dev/null +++ b/jbtest.m @@ -0,0 +1,32 @@ +function [R, S, K] = jbtest(F, p); + +nts = size(F, 1); + +chi_qntls = [0.005 0.01; + 0.010 0.02; + 0.025 0.05; + 0.050 0.10; + 0.100 0.21; + 0.500 1.39; + 0.900 4.61; + 0.950 5.99; + 0.975 7.38; + 0.990 9.21; + 0.995 10.60]; + +qntl = chi_qntls(find(chi_qntls(:, 1) == p), 2); + +if isempty(qntl), error('Unknown propability!'), end + +F = F - ones(nts, 1)*mean(F); + +S = ((1/nts)*sum(F.^3))./(1/nts*sum(F.^2)).^(3/2); +K = ((1/nts)*sum(F.^4))./(1/nts*sum(F.^2)).^2; + +JB = nts/6*(S.^2 + ((K-3).^2)./4); + +R = zeros(size(S)); + +R(JB < qntl) = 1; + + diff --git a/kalman_falling_body.m b/kalman_falling_body.m new file mode 100644 index 0000000..1026049 --- /dev/null +++ b/kalman_falling_body.m @@ -0,0 +1,167 @@ +function [] = adjustment_theory_example + + +% Parameter der Trajektorie +g = 9.81; % Erdanziehung +x(1) = 0; % Startwert für x-Richtung +y(1) = 0; % Startwert für y-Richtung +vy(1) = 10; % Startgeschw. in x-Richtung +vx = 10; % Startgeschw. in y-Richtung +dt = 0.05; % Zeitintervall + + +% Berechnung der Referenztrajektorie +for i = 1:40 +x(i+1,1) = x(i) + vx*dt; +vy(i+1,1) = vy(i) - g*dt; +y(i+1,1) = y(i) + vy(i)*dt - 1/2*g*dt^2; +end + +% Zum Vergleich: Analytische Bestimmung der Kurve +y_ana = y(1) + vy(1)/vx*x - (g/(2*vx^2))*x.^2; + +% Bestimmung von Pseudo-Beobachtungen +L = y + randn(length(y),1)/2; + +% Kovarianzmatrix der Unbekannten +% 1. Annahme: alle Beobachtungen gleich genau -> Diagonalmatrix +P = eye(length(L), length(L)); + +% Vektor mit Näherungswerten für die Unbekannten +x0 = [0; 9; 11]; +xht = x0; + + +% Berechnung des Vektors der Näherungswerte der Messwerte +% ("Näherungsbeobachtungen") +L_approx = xht(1) + xht(2)/xht(3)*x - g/(2*xht(3)^2)*x.^2; +L_0 = L_approx; + +% Berechnung des Absolutgliedvektors +l = L - L_0; + + +% Festlegung von Schleifenvariablen +crit = 1; +k = 1; + + +while crit == 1 + + % Aufstellen der A-matrix mit a_ij = (dfi/dXi)|x_0 + A(:,1) = ones(size(L,1),1); % Ableitung nach y_0 + A(:,2) = x'./xht(2); % Ableitung nach v_0y + A(:,3) = -xht(2)*x'/xht(3)^2 + g/xht(3)^3*x'.^2; % Ableitung nach v_0x + + % Berechnugn des Vektors der Unbekannten (Nicht-linear -> lediglich + % Zuschläge!!!) + dx = inv(A'P*A)*A'P*l; + + % Berechnung der Verbesserungen + v = A*dx - l; + + % Kofaktorenmatrix der ausgegl. Parameter + Qxx = A'*P*A; + + % Berechnung der endgültigen Parameter: Näherunswerte + Zuschläge + xht = xht + dx; + + % Vektor der ausgeglichenen Messwerte (in der nächsten Iteration -> + % Näherungsbeobachtungen) + L_0 = A*dx + L_0; + + % Berechnung des Absolutgliedvektors für die nächste Iteration + l = L - L_0; + + % Ãœberprüfen des Abbruchkriteriums (falls Zuschläge klein genug sind) + if norm(dx) < 1e-10 + crit = 0; + end + + % Falls Lösungen nicht konvergieren (z.B. schlecht gewählte + % Näherungswerte) -> Schleifenabbruch + k = k + 1 + if k == 100 + crit = 0; + sprintf('Lösung konvergiert nicht!') + end + +end +keyboard + + + + + + +% function [x_ht, x_p, P] = kalman_falling_body(true_vals, obs, x0, P0, Q, R) +% +% g = 9.81; +% +% +% +% A = [1 dt 0 0 ; +% 0 1 0 0 ; +% 0 0 1 dt; +% 0 0 0 1]; +% +% B = [0; 0; dt^2/2; dt]; +% u = -g; +% +% H = [0 0 1 0]; + + + + +% plot(true_vals, 'k', 'linewidth', 1.5); +% hold on +% plot(obs, 'b+', 'linewidth', 1.5); +% +% +% +% +% A = [1 1; 0 1]; +% B = [0.5; 1]; +% H = [1 0]; +% +% u = -9.81; +% +% % initial values: +% x_ht(:,1) = x0; +% +% +% +% for i = 1:length(obs) +% +% % Prediction step +% if i == 1 +% x_pred = A*x0 + B*u; +% P_pred = A*P0*A' + Q; +% else +% x_pred = A*x_ht(:,i-1) + B*u; % State prediction +% P_pred = A*P*A' + Q; % Covariance prediction +% end +% +% x_p(:,i) = x_pred; +% +% % Corrector step +% v = obs(i) - H*x_pred; % Innovation +% +% if length(R) > 1 % Innovation covariance +% S = H*P_pred*H' + R(i); % constant obs. error +% else % Innovation covariance +% S = H*P_pred*H' + R; % changing obs. error +% end +% +% K = P_pred*H'*inv(S); % Kalman Gain +% x_ht(:,i) = x_pred + K*v; % Update state +% P = ([1 0; 0 1] - K*H)*P_pred; % Update covariance +% +% +% end +% +% % keyboard + + + + diff --git a/kendallstau.m b/kendallstau.m new file mode 100644 index 0000000..63ef550 --- /dev/null +++ b/kendallstau.m @@ -0,0 +1,53 @@ +function tau = matrix_corr(inpt, type) + + + + + +% keyboard + +if strcmp(type, 'kendall') + + [in1_srt, indx] = sort(inpt, 2, 'descend'); + + for i = 1:size(inpt,1) + tmp = tiedrank(inpt(i,:)); + ranks(i,:) = -tmp + max(tmp) + 1; + end + tau = zeros(size(inpt,1), size(inpt,1)); + n = size(inpt,2); + + for i = 1:size(ranks,1) + tmp = ranks(i:end, indx(i,:)); + Q = zeros(size(tmp,1), 1); + q = zeros(size(tmp)); + + for j = 1:size(tmp,1) + for k = 1:size(tmp,2) + tmp2 = zeros(1, length(tmp(j, k+1:end))); + tmp2(tmp(j, k+1:end) <= tmp(j, k)) = 1; + q(j, k) = sum(tmp2); + end + end + Q = sum(q,2); + tau(i, i:end) = 1-4*Q'/(n*(n-1)); + end + +elseif strcmp(type, 'spearman') + +elseif strcmp(type, 'pearson') + A = inpt - mean(inpt, 2)*ones(1, size(inpt,2)); + std = sum(A.*A,2) + num = A*A'; + den = std*std'; + tau = num./den; + +end + + +% tau = tau + tau' - +% % +% [in2_srt, indx2] = sort(indx, 'ascend'); + + + diff --git a/kstest_matlab.m b/kstest_matlab.m new file mode 100644 index 0000000..5b9e00b --- /dev/null +++ b/kstest_matlab.m @@ -0,0 +1,178 @@ +function [H, pValue, KSstatistic] = kstest_matlab(x1, x2, alpha, tail) +%KSTEST2 Two-sample Kolmogorov-Smirnov goodness-of-fit hypothesis test. +% H = KSTEST2(X1,X2,ALPHA,TYPE) performs a Kolmogorov-Smirnov (K-S) test +% to determine if independent random samples, X1 and X2, are drawn from +% the same underlying continuous population. ALPHA and TYPE are optional +% scalar inputs: ALPHA is the desired significance level (default = 0.05); +% TYPE indicates the type of test (default = 'unequal'). H indicates the +% result of the hypothesis test: +% H = 0 => Do not reject the null hypothesis at significance level ALPHA. +% H = 1 => Reject the null hypothesis at significance level ALPHA. +% +% Let S1(x) and S2(x) be the empirical distribution functions from the +% sample vectors X1 and X2, respectively, and F1(x) and F2(x) be the +% corresponding true (but unknown) population CDFs. The two-sample K-S +% test tests the null hypothesis that F1(x) = F2(x) for all x, against the +% alternative specified by TYPE: +% 'unequal' -- "F1(x) not equal to F2(x)" (two-sided test) +% 'larger' -- "F1(x) > F2(x)" (one-sided test) +% 'smaller' -- "F1(x) < F2(x)" (one-sided test) +% +% For TYPE = 'unequal', 'larger', and 'smaller', the test statistics are +% max|S1(x) - S2(x)|, max[S1(x) - S2(x)], and max[S2(x) - S1(x)], +% respectively. +% +% The decision to reject the null hypothesis occurs when the significance +% level, ALPHA, equals or exceeds the P-value. +% +% X1 and X2 are vectors of lengths N1 and N2, respectively, and represent +% random samples from some underlying distribution(s). Missing +% observations, indicated by NaNs (Not-a-Number), are ignored. +% +% [H,P] = KSTEST2(...) also returns the asymptotic P-value P. +% +% [H,P,KSSTAT] = KSTEST2(...) also returns the K-S test statistic KSSTAT +% defined above for the test type indicated by TYPE. +% +% The asymptotic P-value becomes very accurate for large sample sizes, and +% is believed to be reasonably accurate for sample sizes N1 and N2 such +% that (N1*N2)/(N1 + N2) >= 4. +% +% See also KSTEST, LILLIETEST, CDFPLOT. +% + +% Copyright 1993-2011 The MathWorks, Inc. +% $Revision: 1.1.8.3 $ $ Date: 1998/01/30 13:45:34 $ + +% References: +% Massey, F.J., (1951) "The Kolmogorov-Smirnov Test for Goodness of Fit", +% Journal of the American Statistical Association, 46(253):68-78. +% Miller, L.H., (1956) "Table of Percentage Points of Kolmogorov Statistics", +% Journal of the American Statistical Association, 51(273):111-121. +% Stephens, M.A., (1970) "Use of the Kolmogorov-Smirnov, Cramer-Von Mises and +% Related Statistics Without Extensive Tables", Journal of the Royal +% Statistical Society. Series B, 32(1):115-122. +% Conover, W.J., (1980) Practical Nonparametric Statistics, Wiley. +% Press, W.H., et. al., (1992) Numerical Recipes in C, Cambridge Univ. Press. + +if nargin < 2 + error(message('stats:kstest2:TooFewInputs')); +end + +% +% Ensure each sample is a VECTOR. +% + +if ~isvector(x1) || ~isvector(x2) + error(message('stats:kstest2:VectorRequired')); +end + +% +% Remove missing observations indicated by NaN's, and +% ensure that valid observations remain. +% + +x1 = x1(~isnan(x1)); +x2 = x2(~isnan(x2)); +x1 = x1(:); +x2 = x2(:); + +if isempty(x1) + error(message('stats:kstest2:NotEnoughData', 'X1')); +end + +if isempty(x2) + error(message('stats:kstest2:NotEnoughData', 'X2')); +end + +% +% Ensure the significance level, ALPHA, is a scalar +% between 0 and 1 and set default if necessary. +% + +if (nargin >= 3) && ~isempty(alpha) + if ~isscalar(alpha) || (alpha <= 0 || alpha >= 1) + error(message('stats:kstest2:BadAlpha')); + end +else + alpha = 0.05; +end + +% +% Ensure the type-of-test indicator, TYPE, is a scalar integer from +% the allowable set, and set default if necessary. +% + +if (nargin >= 4) && ~isempty(tail) + if ischar(tail) + try + [~,tail] = internal.stats.getParamVal(tail, ... + {'smaller','unequal','larger'},'Type'); + catch + error(message('stats:kstest2:BadTail')); + end + tail = tail - 2; + elseif ~isscalar(tail) || ~((tail==-1) || (tail==0) || (tail==1)) + error(message('stats:kstest2:BadTail')); + end +else + tail = 0; +end + +% +% Calculate F1(x) and F2(x), the empirical (i.e., sample) CDFs. +% + +binEdges = [-inf ; sort([x1;x2]) ; inf]; + +binCounts1 = histc (x1 , binEdges, 1); +binCounts2 = histc (x2 , binEdges, 1); + +sumCounts1 = cumsum(binCounts1)./sum(binCounts1); +sumCounts2 = cumsum(binCounts2)./sum(binCounts2); + +sampleCDF1 = sumCounts1(1:end-1); +sampleCDF2 = sumCounts2(1:end-1); +keyboard +% +% Compute the test statistic of interest. +% + +switch tail + case 0 % 2-sided test: T = max|F1(x) - F2(x)|. + deltaCDF = abs(sampleCDF1 - sampleCDF2); + + case -1 % 1-sided test: T = max[F2(x) - F1(x)]. + deltaCDF = sampleCDF2 - sampleCDF1; + + case 1 % 1-sided test: T = max[F1(x) - F2(x)]. + deltaCDF = sampleCDF1 - sampleCDF2; +end + +KSstatistic = max(deltaCDF); + +% +% Compute the asymptotic P-value approximation and accept or +% reject the null hypothesis on the basis of the P-value. +% + +n1 = length(x1); +n2 = length(x2); +n = n1 * n2 /(n1 + n2); +lambda = max((sqrt(n) + 0.12 + 0.11/sqrt(n)) * KSstatistic , 0); + +if tail ~= 0 % 1-sided test. + + pValue = exp(-2 * lambda * lambda); + +else % 2-sided test (default). +% +% Use the asymptotic Q-function to approximate the 2-sided P-value. +% + j = (1:101)'; + pValue = 2 * sum((-1).^(j-1).*exp(-2*lambda*lambda*j.^2)); + pValue = min(max(pValue, 0), 1); + +end + +H = (alpha >= pValue); diff --git a/lagged_matrix.m b/lagged_matrix.m new file mode 100644 index 0000000..6b691f8 --- /dev/null +++ b/lagged_matrix.m @@ -0,0 +1,28 @@ +function F_lag = lagged_matrix(F, lags, method) + +[nts, npts] = size(F); +nr_lags = length(lags); + + +F_lag = zeros(nts - max(lags), nr_lags*npts); + +if method == 1 + for i = 1:nr_lags + indx = (lags(i)+1:nts-max(lags)+lags(i))'; + F_lag(:, (i-1)*npts + 1 : i*npts) = F(indx, :); + end +elseif method == 2 + keyboard + for i = 1:nr_lags + + indx(:, i) = (lags(i)+1:nts-max(lags)+lags(i))'; + F_lag(:, (i-1)*npts + 1 : i*npts) = F(indx, :); + end + + end +end + + + + + \ No newline at end of file diff --git a/latmn.m b/latmn.m new file mode 100644 index 0000000..0292cc1 --- /dev/null +++ b/latmn.m @@ -0,0 +1,13 @@ +function Sig_lat = latmn(inpt, mval) + +if ~isnan(mval) + for i = 1:length(inpt) + inpt{i}(inpt{i} == mval) = NaN; + end +end + +for i = 1:length(inpt) + tmp = nanmean(inpt{i}, 2); + tmp(isnan(tmp)) = []; + Sig_lat(:,i) = tmp; +end \ No newline at end of file diff --git a/ldlt.m b/ldlt.m new file mode 100644 index 0000000..0fd92d5 --- /dev/null +++ b/ldlt.m @@ -0,0 +1,49 @@ +% +% [L,D]=ldlt(A) +% +% This function computes the square root free Cholesky factorization +% +% A=L*D*L' +% +% where L is a lower triangular matrix with ones on the diagonal, and D +% is a diagonal matrix. +% +% It is assumed that A is symmetric and postive definite. +% +% Reference: Golub and Van Loan, "Matrix Computations", second edition, +% p 137. +% Author: Brian Borchers (borchers@nmt.edu) +% +function [L,D]=ldlt(A) +% +% Figure out the size of A. +% +n=size(A,1); +% +% The main loop. See Golub and Van Loan for details. +% +L=zeros(n,n); +for j=1:n, + if (j > 1), + v(1:j-1)=L(j,1:j-1).*d(1:j-1); + v(j)=A(j,j)-L(j,1:j-1)*v(1:j-1)'; + d(j)=v(j); + if (j < n), + L(j+1:n,j)=(A(j+1:n,j)-L(j+1:n,1:j-1)*v(1:j-1)')/v(j); + end; + else + v(1)=A(1,1); + d(1)=v(1); + L(2:n,1)=A(2:n,1)/v(1); + end; +end; +% +% Put d into a matrix. +% +D=diag(d); +% +% Put ones on the diagonal of L. +% +L=L+eye(n); + + diff --git a/lm_sort.m b/lm_sort.m new file mode 100755 index 0000000..39e901b --- /dev/null +++ b/lm_sort.m @@ -0,0 +1,69 @@ +function Anew = lm_sort(Aold, Lmax, sor) + +% This function changes the arrangement of spherical harmonic coefficients +% from degree-wise to Colombo (order-wise) and vice versa +%-------------------------------------------------------------------------- +% Input: Aold [n x 1] vector with spherical harmonic +% coefficients +% Lmax [1 x 1] maximal degree +% +% sor [string] defines the arrangement of the output +% coefficients +% 'l2m' -> Colombo +% 'm2l' -> degree-wise +% Output: Anew [n x 1] vector with spherical harmonic +% coefficients +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: September 2008 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +if size(Aold) == [1 length(Aold)] + Aold = Aold'; +end + +if strcmp(sor, 'l2m') + + Anew = zeros(size(Aold)); + start(1) = 1; + for i = 2:Lmax+1 + start(i) = start(i-1) + i; + end + + index1 = 1:Lmax+1; + index2 = 1:-1:-Lmax+2; + index3 = 0:-1:-Lmax+1; + + K = 1; + H = length(start); + + for i = 1:length(start) + k(1) = start(i); + for j = index1(i):Lmax + k(j+index2(i)) = k(j+index3(i))+j; + end + + Anew(K:H) = Aold(k); + K = K + length(start)+1-i; + H = H + length(start)-i; + clear k; + end + +elseif strcmp(sor, 'm2l') + + add = zeros(Lmax+1, 1); + add(2:end) = Lmax:-1:1; + Anew = []; + add_(1) = add(1); + for i = 2:length(add) + add_(i) = add_(i-1)+add(i); + end + + for l = 1:Lmax+1 + m = 1:l; + Anew = [Anew; Aold(l+add_(1,m),1)]; + end + +end diff --git a/load_data.m b/load_data.m new file mode 100644 index 0000000..f8c1189 --- /dev/null +++ b/load_data.m @@ -0,0 +1,42 @@ +function fields = load_data(dtaset, syr, eyr, smnth, emnth); + +if nargin < 6, emnth = 12; end +if nargin < 5, smnth = 1; end +if nargin < 4, eyr = 1; end +if nargin < 3, syr = 0; end + +dtafld = '/media/storage/Data/'; + +if strcmp(dtaset, 'gpcc_prec_6') + fnme = [dtafld, 'Precipitation/GPCC/GPCC_PRECv6.0.mat']; +elseif strcmp(dtaset, 'gpcc_prec_5') + fnme = [dtafld, 'Precipitation/GPCC/GPCC_PRECv5.0.mat']; +elseif strcmp(dtaset, 'gpcc_prec_4') + fnme = [dtafld, 'Precipitation/GPCC/GPCC_PRECv4.0.mat']; +elseif strcmp(dtaset, 'erai_prec') + fnme = [dtafld, 'Precipitation/ECMWF/ECMWF_PREC.mat']; +end + + +tmp = importdata(fnme); + +% Tweak to decide weather the date information is stored in columns 3 & 4 +% or 4 & 5 +mnth1 = cell2mat(tmp(:,3)); +if mnth1(13) = 1 + mnths = cell2mat(tmp(:,3)); + yrs = cell2mat(tmp(:,4)); + findx = 8; +else + mnths = cell2mat(tmp(:,4)); + yrs = cell2mat(tmp(:,5)); + findx = 9; +end + +sindx = find(mnths == smnth & yrs = syr); +eindx = find(mnths == emnth & yrs = eyr); + +fields = dtaset(sindx:eindx, findx); + + + \ No newline at end of file diff --git a/lsft.m b/lsft.m new file mode 100644 index 0000000..bccb0e3 --- /dev/null +++ b/lsft.m @@ -0,0 +1,67 @@ +function [a b] = lsft(y,t,T); +% The function computes a least-squares fit to estimate the a and b +% coefficients of a fourier series. It should be used when the data has +% gaps, i.e. when a normal fft can not be performed. +% Note that the first coefficient a_0 is not estimated, i.e. the mean +% should be already removed from the data. +%-------------------------------------------------------------------------- +% Input (mandatory): +% - y [t x 1] Vector containing the data +% +% Input (optional): +% - t [t x 1] Vector containing the time-steps (i.e. the sampling +% times) of the data vector y. +% - T scalar Period of the lowest frequency (i.e. the fundamental +% frequency). In general, T equals the length of the +% GAP-FREE time-series +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: January 201 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + + + +if nargin < 4 + showfit = true; +end + +if nargin < 3 + T = length(y); + if nargin < 2 + t = (0:T-1)'; + else + if size(t,1) ~= 1 + t = t'; + end + end +end + +% Fundamental frequency +omega_0 = 2*pi/T; + +n = 1:T/2; +N = max(n); + +omega_n = n*omega_0; + +A = [cos(t*omega_n) sin(t*omega_n)]; +x_hat = inv(A'*A)*A'*y; + +a = x_hat(1:N); +b = x_hat(N+1:end); + +if showfit == true + figure + t_nw = 0:1:T-1; + + plot(t, y) + hold on + plot(t_nw, A*x_hat, 'r') +end + + + + + diff --git a/make_movie.m b/make_movie.m new file mode 100644 index 0000000..0e54bf3 --- /dev/null +++ b/make_movie.m @@ -0,0 +1,60 @@ +function F = make_movie(data, tles, out_name, lims, lam, th, cxis, aspct) + + + +load coast +load cmap_grace.mat + +aviobj = avifile(out_name, 'compression', 'none', 'fps', 3, 'quality', 100, 'colormap', cmap_grace); + +% 'units', 'normalized', +hf=figure('visible', 'on'); +set(hf, 'units', 'pixels'); +% rect = get(hf,'Position') +% % keyboard +% rect(1:2) = [0 0]; +hax = axes; + +set(hf, 'DoubleBuffer', 'on'); +h = imagesc(lam, th, data{1}, 'Parent', hax, 'EraseMode', 'xor'); +hold on +plot(long, lat, 'k', 'linewidth', 2, 'Parent', hax); + +grid on +caxis(cxis); +axis(lims); +pbaspect(aspct); +axis xy +colormap(cmap_grace) +g = colorbar('eastoutside'); +set(get(g, 'ylabel'), 'String', '[mm/month]', 'fontsize', 20); +set(hax, 'xtick', -180:30:180, 'fontsize', 20); +set(hax, 'ytick', -90:30:90); + +title(tles{1}, 'fontsize', 25); +pause + +rect = get(hf,'Position') +rect(1:2) = [0 0]; + +F = getframe(hf, rect); +aviobj = addframe(aviobj, F); + + +for i = 2:length(data) + set(h, 'CData', data{i}, 'Parent', hax); + title(tles{i}, 'fontsize', 15); + + F = getframe(hf,rect); + aviobj = addframe(aviobj, F); + +end + +close(hf); +aviobj = close(aviobj); + + + + + + diff --git a/mat2netcdf.m b/mat2netcdf.m new file mode 100644 index 0000000..8776d0b --- /dev/null +++ b/mat2netcdf.m @@ -0,0 +1,39 @@ +function [] = mat2netcdf(mtrx, fname, varnme, theta, lambda, mval, unit) + +ncid = netcdf.create(fname, 'CLOBBER'); +lon_dim_id = netcdf.defDim(ncid, 'longitude', length(lambda)); +lat_dim_id = netcdf.defDim(ncid, 'latitude' , length(theta)); + +lon_var_id = netcdf.defVar(ncid, 'longitude', 'double', lon_dim_id); +lat_var_id = netcdf.defVar(ncid, 'latitude', 'double', lat_dim_id); +data_var_id = netcdf.defVar(ncid, varnme, 'double', [lon_dim_id lat_dim_id]); + +netcdf.endDef(ncid) +netcdf.putVar(ncid, lon_var_id, lambda); +netcdf.putVar(ncid, lat_var_id, theta); +netcdf.putVar(ncid, data_var_id, mtrx'); + +netcdf.reDef(ncid) +netcdf.putAtt(ncid, lon_var_id, 'units', 'degrees_east'); +netcdf.putAtt(ncid, lon_var_id, 'long_name', 'longitude coordinate'); +netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', 'Lon'); + +netcdf.putAtt(ncid, lat_var_id, 'units', 'degrees_north'); +netcdf.putAtt(ncid, lat_var_id, 'long_name', 'latitude coordinate'); +netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', 'Lat'); + +netcdf.putAtt(ncid, data_var_id, '_CoordinateAxes', 'Lon Lat'); + + + + + + + +% +% netcdf.reDef(ncid) + + + + + \ No newline at end of file diff --git a/mat2vec.m b/mat2vec.m new file mode 100755 index 0000000..8f97b4e --- /dev/null +++ b/mat2vec.m @@ -0,0 +1,40 @@ +function Q_ = mat2vec(Q) + +% mat2vec.m transforms a matrix in the c\s or s|c -format into a Colombo +% ordered vector +%-------------------------------------------------------------------------- +% Input: Q [n x n] matrix in c\s format +% +% +% Output: Q_ [n^2 x 1] Colombo ordered vector +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: 8. Sep. 08 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- +% Updates: - 11.10.2011: Removed the for-loops, added support for +% s|c-format (CL) + +[r, c] = size(Q); +lmx = r - 1; + +Qclm = []; +Qslm = []; + +if r == c % Input dataset is in c\s-format + + Qclm = tril(Q, 0) + triu(ones(r,c),1)*-9999; + Qslm = (triu(Q, 1) + tril(ones(r,c),0)*-9999)'; + +elseif r == (c-1)/2 + 1 % Input dataset is in s|c-format + + Qclm = tril(Q(:, lmx+1:end), 0) + triu(ones(r,lmx+1),1)*-9999; + Qslm = tril(fliplr(Q(:, 1:lmx)), -1) + triu(ones(r,lmx),0)*-9999; + +end + + +Q_ = [Qclm(Qclm ~= -9999); Qslm(Qslm ~= -9999)]; + + diff --git a/matrix_corr.m b/matrix_corr.m new file mode 100644 index 0000000..1912d80 --- /dev/null +++ b/matrix_corr.m @@ -0,0 +1,92 @@ +function corr = matrix_corr(inpt, type) + +% The function computes a correlation-matrix depending on the input matrix +% inpt. The rows of this matrix are defined by different pixels while the +% column dimension corresponds to the time-steps according to the following +% example: +% inpt = [p1(t1) p1(t2) ... p1(tm); +% p2(t1) p2(t2) ... ; +% . +% . +% . +% pn(t1) pn(t2) ... pn(tm)]; +% where row i with i=1,...,n corresponds to the pixel i and column j +% represents the time-step j with j=1,...,m. +% The function allows the computation of the three widely used correlation +% coefficients according to Pearson (default), Spearman and Kendall. Due to +% their definition, the computation of the two rank correlation coefficients +% Searman's rho and Kendall's tau are computationally demanding. +%-------------------------------------------------------------------------- +% Input: inpt [m x n] input matrix with m time-series +% containing n time-steps +% type string desired correlation coefficient: +% 'pearson' -> Pearson's r +% 'spearman' -> Spearman's rho +% 'kendall' -> Kendall's tau + +% Output: corr [m x m] Correlation matrix +%-------------------------------------------------------------------------- +% Author: Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% Date: June 2011 +%-------------------------------------------------------------------------- +% Uses: tiedrank.m (for spearman and kendall, intrinsic function) +%-------------------------------------------------------------------------- + +if nargin < 2 + type = 'pearson'; +end + +if strcmp(type, 'pearson') + % Matrix with the deviations from the mean value in each pixel + A = inpt - mean(inpt, 2)*ones(1, size(inpt,2)); + % Squared standard deviations + std = sum(A.*A,2); + % Compute the numerator + num = A*A'; + % Compute the denominator + den = (std*std').^(1/2); + % Correlation is given by corr = cov(x,y)/(sqrt(var(x))*sqrt(var(y))) + corr = num./den; + +elseif strcmp(type, 'kendall') + + % Sort the input data in descending order and save the original indices + [in1_srt, indx] = sort(inpt, 2, 'descend'); + + % Compute the ranks of the input data + for i = 1:size(inpt,1) + tmp = tiedrank(inpt(i,:)); + % tiedrank.m computes the ranks in descending order but we need an + % ascending order + ranks(i,:) = -tmp + max(tmp) + 1; + end + + corr = zeros(size(inpt,1), size(inpt,1)); + n = size(inpt,2); + + for i = 1:size(ranks,1) + tmp = ranks(i:end, indx(i,:)); + Q = zeros(size(tmp,1), 1); + q = zeros(size(tmp)); + + for j = 1:size(tmp,1) + for k = 1:size(tmp,2) + tmp2 = zeros(1, length(tmp(j, k+1:end))); + tmp2(tmp(j, k+1:end) <= tmp(j, k)) = 1; + q(j, k) = sum(tmp2); + end + end + Q = sum(q,2); + corr(i, i:end) = 1-4*Q'/(n*(n-1)); + end + +elseif strcmp(type, 'spearman') + + +end + + + + + + diff --git a/matrixcorr.m b/matrixcorr.m new file mode 100644 index 0000000..848726a --- /dev/null +++ b/matrixcorr.m @@ -0,0 +1,14 @@ +function R = matrixcorr(ts1, ts2); + +[n, p] = size(ts1); + +mn1 = mean(ts1,1); +mn2 = mean(ts2,1); + +std1 = std(ts1,1); +std2 = std(ts2,1); + +for i = 1:p + R(1, i) = 1/(n-1)*((ts1(:,i) - mn1(i))'*(ts2(:,i) - mn2(i)))/(std1(i)*std2(i)); +end + diff --git a/mc_lags.m b/mc_lags.m new file mode 100644 index 0000000..253bfb3 --- /dev/null +++ b/mc_lags.m @@ -0,0 +1,23 @@ +function M = mc_lags(maxlag, nrit, usezero) + + +for i = 1:nrit + % Compute the number of lags (i.e. the window length) + nr_lags = randi([0 maxlag], 1); + + if nr_lags == 0 + M{i} = 0; + else + tmp = randi([0 maxlag], [1 nr_lags]); + tmp = sort(unique(tmp), 'ascend'); + if usezero == true + if tmp(1) ~= 0 + tmp = [0 tmp]; + end + end + M{i} = tmp; + end +end + + + diff --git a/mergets.m b/mergets.m new file mode 100644 index 0000000..f9283a5 --- /dev/null +++ b/mergets.m @@ -0,0 +1,66 @@ +function otpt = mergets(period, clms, datatype, varargin) + +if strcmp(datatype, 'full') + sind = 2; +else + sind = 1; +end + +syr_1 = -inf; +smnth_1 = -inf; +eyr_1 = inf; +emnth_1 = inf; + +for i = 1:length(varargin) + syr_2 = varargin{i}(sind, clms(2)); + smnth_2 = varargin{i}(sind, clms(1)); + eyr_2 = varargin{i}(end, clms(2)); + emnth_2 = varargin{i}(end, clms(1)); + + if syr_2 > syr_1 + syr_1 = syr_2; + smnth_1 = smnth_2; + elseif syr_2 == syr_1 + if smnth_2 > smnth_1 + smnth_1 = smnth_2; + end + end + + if eyr_2 < eyr_1 + eyr_1 = eyr_2; + emnth_1 = emnth_2; + elseif eyr_2 == eyr_1 + if emnth_2 < emnth_1 + emnth_1 = emnth_2; + end + end +end + + +for i = 1:length(varargin) + sindx = find(varargin{i}(:, clms(1)) == smnth_1 & ... + varargin{i}(:, clms(2)) == syr_1); + eindx = find(varargin{i}(:, clms(1)) == emnth_1 & ... + varargin{i}(:, clms(2)) == eyr_1); + if i == 1 + otpt(:, 1) = varargin{i}(sindx:eindx, clms(1)); + otpt(:, 2) = varargin{i}(sindx:eindx, clms(2)); + end + + otpt(:, i+2) = varargin{i}(sindx:eindx, clms(3)); +end + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mmmnth2mmday.m b/mmmnth2mmday.m new file mode 100644 index 0000000..10eb5fb --- /dev/null +++ b/mmmnth2mmday.m @@ -0,0 +1,85 @@ +function otpt = mmmnth2mmday(inpt, clms, mval, method, datatype); +% The function transforms the data in inpt from mm/month to mm/day and vice +% versa. + +%-------------------------------------------------------------------------- +% Input: inpt matrix/cell Input data +% clms vector Columns which contain the time +% information +% mval scalar Missing value identifier +% method 1,2 1: mm/month -> mm/day +% 2: mm/day -> mm/month +% datatype char Full dataset contains area indices +% in the first row. +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: July 2012 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +if nargin < 5, datatype = 'full'; end +if nargin < 4, method = 1; end +if nargin < 3, mval = -9999; end + +if nargin < 2 + if isnumeric(inpt) + clms = [1 2 4]; + elseif iscell(inpt) + clms = [3 4 8]; + end +end + + +otpt = inpt; + +if isnumeric(inpt) + + if strcmp(datatype, 'full') + sind = 2; + else + sind = 1; + end + + mnth = inpt(sind:end, clms(1)); + yr = inpt(sind:end, clms(2)); + nrd = eomday(yr, mnth); + + nrtsps = length(mnth); + nrtsrs = length(inpt(sind, clms(3):end)); + + + daymat = nrd*ones(1, nrtsrs); + + if method == 1 + otpt(sind:end, clms(3):end) = otpt(sind:end, clms(3):end)./daymat; + elseif method == 2 + otpt(sind:end, clms(3):end) = otpt(sind:end, clms(3):end).*daymat; + end + + otpt(inpt == mval) = mval; + + +elseif iscell(inpt) + + mnth = cell2mat(inpt(:, clms(1))); + yr = cell2mat(inpt(:, clms(2))); + nrd = eomday(yr, mnth); + + if method == 1 + for i = 1:length(mnth) + otpt{i, clms(3)} = inpt{i, clms(3)}./nrd(i); + otpt{i, clms(3)}(inpt{i, clms(3)} == mval) = mval; + end + elseif method == 2 + for i = 1:length(mnth) + otpt{i, clms(3)} = inpt{i, clms(3)}*nrd(i); + otpt{i, clms(3)}(inpt == mval) = mval; + end + end + +end + + + + diff --git a/mmstommmonth.m b/mmstommmonth.m new file mode 100644 index 0000000..3bb4acd --- /dev/null +++ b/mmstommmonth.m @@ -0,0 +1,23 @@ +function vimfd_mnth = mmstommmonth(vimfd_s) + +lyear = [2000 2004 2008 2012]; + +vimfd_mnth = vimfd_s; + +for i = 1:length(vimfd_s) + if vimfd_s{i,1} == 1 || vimfd_s{i,1} == 3 || vimfd_s{i,1} == 5 ... + || vimfd_s{i,1} == 7 || vimfd_s{i,1} == 8 ... + || vimfd_s{i,1} == 10 || vimfd_s{i,1} == 12 + days = 31; + elseif vimfd_s{i,1} == 4 || vimfd_s{i,1} == 6 || vimfd_s{i,1} == 9 ... + || vimfd_s{i,1} == 11 + days = 30; + elseif vimfd_s{i,1} == 2 && max(vimfd_s{i,2} == lyear) == 0 + days = 29; + elseif vimfd_s{i,1} == 2 && max(vimfd_s{i,2} == lyear) == 1 + days = 28; + end + + vimfd_mnth{i,3} = vimfd_mnth{i,3}*3600*24*days; +end + \ No newline at end of file diff --git a/mnth_yr_vec.m b/mnth_yr_vec.m new file mode 100644 index 0000000..1e66370 --- /dev/null +++ b/mnth_yr_vec.m @@ -0,0 +1,29 @@ +function [mnths, yrs] = mnth_yr_vec(smnth, syr, emnth, eyr); + + + +if syr == eyr + nts = emnth - smnth + 1; +else + mnths_y1 = 12 - smnth + 1; + mnths_y2 = emnth; + + nts = mnths_y1 + 12*(eyr - (syr + 1)) + mnths_y2; +end + + +mnths(1, 1) = smnth; +yrs(1, 1) = syr; + +for i = 2:nts + mnths(i, 1) = mnths(i-1, 1) + 1; + + if mnths(i, 1) == 13 + mnths(i, 1) = 1; + yrs(i, 1) = yrs(i-1, 1) + 1; + else + yrs(i, 1) = yrs(i-1, 1); + end +end + + diff --git a/mnthnms.m b/mnthnms.m new file mode 100644 index 0000000..35b91c0 --- /dev/null +++ b/mnthnms.m @@ -0,0 +1,56 @@ +function mnthnms = mnthnms(stle) + + +if nargin < 1 + stle = 'short'; +end + +if strcmp(stle, 'long') + mnthnms{1,1} = 'January'; + mnthnms{2,1} = 'February'; + mnthnms{3,1} = 'March'; + mnthnms{4,1} = 'April'; + mnthnms{5,1} = 'May'; + mnthnms{6,1} = 'June'; + mnthnms{7,1} = 'July'; + mnthnms{8,1} = 'August'; + mnthnms{9,1} = 'September'; + mnthnms{10,1} = 'October'; + mnthnms{11,1} = 'November'; + mnthnms{12,1} = 'December'; +elseif strcmp(stle, 'short') + mnthnms{1,1} = 'Jan'; + mnthnms{2,1} = 'Feb'; + mnthnms{3,1} = 'Mar'; + mnthnms{4,1} = 'Apr'; + mnthnms{5,1} = 'May'; + mnthnms{6,1} = 'Jun'; + mnthnms{7,1} = 'Jul'; + mnthnms{8,1} = 'Aug'; + mnthnms{9,1} = 'Sep'; + mnthnms{10,1} = 'Oct'; + mnthnms{11,1} = 'Nov'; + mnthnms{12,1} = 'Dec'; +elseif strcmp(stle, 'vshort') + mnthnms{1,1} = 'J'; + mnthnms{2,1} = 'F'; + mnthnms{3,1} = 'M'; + mnthnms{4,1} = 'A'; + mnthnms{5,1} = 'M'; + mnthnms{6,1} = 'J'; + mnthnms{7,1} = 'J'; + mnthnms{8,1} = 'A'; + mnthnms{9,1} = 'S'; + mnthnms{10,1} = 'O'; + mnthnms{11,1} = 'N'; + mnthnms{12,1} = 'D'; +elseif strcmp(stle, 'ssnl') + mnthnms{1,1} = 'DJF'; + mnthnms{2,1} = 'MAM'; + mnthnms{3,1} = 'JJA'; + mnthnms{4,1} = 'SON'; +end + + + + \ No newline at end of file diff --git a/mod01.m b/mod01.m new file mode 100644 index 0000000..46fe2ad --- /dev/null +++ b/mod01.m @@ -0,0 +1,205 @@ +function K_ = mod01(K, tpe, coeff) + +% MOD01 removes or adds c00, c10, c11 and s11 coefficients from a +% given input K which can be a vector or a matrix with its elements being +% arranged orderwise. If c00, c10, c11 and s11 should be added the function +% defines them to be = 0 by default. Otherwise, the coefficients can be +% defined manually by adding the 'coeff'-parameter, which should be a +% [1x4]-vector (coeff = [c00 c10 c11 s11]). +% +% K_ = mod01(K,tpe) +% K_ = mod01(K,tpe,coeff) +% +% Input: K [n x m] input data +% tpe [str] string variable which defines what +% the function should do: +% - 'rem' -> Removes the c00, c10, c11 and +% s11 coefficients +% - 'add' -> adds zeros (or the values in the +% coeff-variable) as c00, c10, c11 +% and s11 coefficients +% - 'rep' -> replaces the c00, c10, c11 +% and s11 coefficients with the +% zeros or the values in the +% coeff-variable +% coeff [4 x 1] optional vector with c00, c10, c11 and s11 +% coefficients +% +% Output: K_ [u x v] output data +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: 4.12.2008 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + + +if nargin < 3 + coeff = 0; +end + +s = size(K); + +if strcmp(tpe, 'rem') + if s(1,2) == 1 + Lmax = sqrt(s(1,1))-1; + format = 1; + elseif s(1,1) == 1 + K = K'; + Lmax = sqrt(s(1,1))-1; + format = 2; + elseif s(1,1) == s(1,2) && s(1,1) > 1 + Lmax = sqrt(s(1,1))-1; + format = 3; + elseif s(1,1) ~= s(1,2) && s(1,1) > 1 && s(1,2) > 1 + Lmax = sqrt(s(1,2))-1; + format = 4; + else error('Input has an unknown input format') + end + + % Determination of the elements before and after the c00, c10, c11 and c20 + % coefficients + a = Lmax+1; + b = Lmax+3; + c = sum(1:a); + d = c+2; + + % Removing the coefficients and rearranging the input data + if format == 1 || format == 2 + K_ = [K(3:a); K(b:c); K(d:end)]; + if format == 2 + K_ = K_'; + end + elseif format == 3 + K_ = [K(3:a, 3:a) K(3:a, b:c) K(3:a, d:end); + K(b:c, 3:a) K(b:c, b:c) K(b:c, d:end); + K(d:end, 3:a) K(d:end, b:c) K(d:end,d:end)]; + elseif format == 4 + K_ = [K(:, 3:a) K(:, b:c) K(:, d:end)]; + end + +elseif strcmp(tpe, 'add') + if s(1,2) == 1 + Lmax = sqrt(s(1,1) + 4) - 1; + K = K'; + format = 1; + elseif s(1,1) == 1 + Lmax = sqrt(s(1,1) + 4) - 1; + format = 2; + elseif s(1,1) == s(1,2) && s(1,1) > 1 + Lmax = sqrt(s(1,1) + 4) - 1; + format = 3; + elseif s(1,1) ~= s(1,2) && s(1,1) > 1 + Lmax = sqrt(s(1,2) + 4) - 1; + format = 4; + else error('Input has an unknown input format') + end + + a = Lmax + 1; + b = Lmax + 3; + c = sum(1:a); + d = Lmax - 1; + + if format == 1 || format == 2 || format == 4 + if format == 1 || format == 2 + K_ = zeros(1, (Lmax+1)^2); + else + K_ = zeros(s(1), (Lmax+1)^2); + end + + if coeff + K_(:, 1:2) = coeff(1:2); + K_(:, a+1) = coeff(3); + K_(:, c+1) = coeff(4); + end + + K_(:, 3:a) = K(:, 1:a-2); + K_(:, b:c) = K(:, a-1:c-3); + K_(:, c+2:end) = K(:, c-2:end); + + if format == 1 + K_ = K_'; + end + + elseif format == 3 + K_ = zeros((Lmax+1)^2, (Lmax+1)^2); + K_(3:a, 3:a) = K(1:d, 1:d); + K_(b:c, 3:a) = K(Lmax:c-3, 1:d); + K_(c+2:end, 3:a) = K(c-2:end, 1:d); + + K_(3:a, b:c) = K(1:d, Lmax:c-3); + K_(b:c, b:c) = K(Lmax:c-3, Lmax:c-3); + K_(c+2:end, b:c) = K(c-2:end, Lmax:c-3); + + K_(3:a, c+2:end) = K(1:d, c-2:end); + K_(b:c, c+2:end) = K(Lmax:c-3, c-2:end); + K_(c+2:end, c+2:end) = K(c-2:end, c-2:end); + end + +elseif strcmp(tpe, 'rep') + if s(1,2) == 1 + Lmax = sqrt(s(1,1))-1; + format = 1; + elseif s(1,1) == 1 + K = K'; + Lmax = sqrt(s(1,1))-1; + format = 2; + elseif s(1,1) == s(1,2) && s(1,1) > 1 && s(1) > 500 + Lmax = sqrt(s(1,1))-1; + format = 3; + elseif s(1,1) ~= s(1,2) && s(1,1) > 1 && s(1,2) > 1 && 2*s(1,1) ~= s(1,2)+1 + Lmax = sqrt(s(1,2))-1; + format = 4; + elseif s(1,1) == s(1,2) && s(1,1) > 1 && s(1) < 501 + Lmax = s(1) - 1; + format = 5; + elseif 2*s(1,1) == s(1,2)+1 + Lmax = s(1) - 1; + format = 6; + else error('Input has an unknown input format') + end + + a = Lmax + 2; + b = sum(1:a)+1; + + if coeff == 0 + coeff = [0 0 0 0]; + end + + if format == 1 || format == 2 + K_(1:2) = coeff(1:2); + K_(a) = coeff(3); + K_(b) = coeff(4); + + if format == 2 + K_ = K_'; + end + + elseif format == 3 + K_ = K; + tmp = zeros((Lmax+1)^2, 1); + K_(1:2, :) = [tmp'; tmp']; + K_(:, 1:2) = [tmp tmp]; + K_(:, a) = tmp; + K_(a, :) = tmp'; + K_(:, b) = tmp; + K_(b, :) = tmp'; + elseif format == 4 + K_ = K; + tmp = zeros((Lmax+1)^2, 1); + K_(:, 1:2) = [tmp tmp]; + K_(:, a) = tmp; + K_(:, b) = tmp; + elseif format == 5 + K_ = K; + K_(1:2, 1:2) = [coeff(1) coeff(4); coeff(2) coeff(3)]; + elseif format == 6 + K_ = K; + K_(1, a-1) = coeff(1); + K_(2, a-2:a) = coeff(2:4); + end +end + + + + diff --git a/mtit.m b/mtit.m new file mode 100644 index 0000000..f067830 --- /dev/null +++ b/mtit.m @@ -0,0 +1,164 @@ +%MTIT creates a major title in a figure with many axes +% +% MTIT +% - creates a major title above all +% axes in a figure +% - preserves the stack order of +% the axis handles +% +%SYNTAX +%------------------------------------------------------------------------------- +% P = MTIT(TXT,[OPT1,...,OPTn]) +% P = MTIT(FH,TXT,[OPT1,...,OPTn]) +% +%INPUT +%------------------------------------------------------------------------------- +% FH : a valid figure handle [def: gcf] +% TXT : title string +% +% OPT : argument +% ------------------------------------------- +% xoff : +/- displacement along X axis +% yoff : +/- displacement along Y axis +% zoff : +/- displacement along Z axis +% +% title modifier pair(s) +% ------------------------------------------- +% TPx : TVx +% see: get(text) for possible +% parameters/values +% +%OUTPUT +%------------------------------------------------------------------------------- +% par : parameter structure +% .pos : position of surrounding axis +% .oh : handle of last used axis +% .ah : handle of invisible surrounding axis +% .th : handle of main title +% +%EXAMPLE +%------------------------------------------------------------------------------- +% subplot(2,3,[1 3]); title('PLOT 1'); +% subplot(2,3,4); title('PLOT 2'); +% subplot(2,3,5); title('PLOT 3'); +% axes('units','inches',... +% 'color',[0 1 .5],... +% 'position',[.5 .5 2 2]); title('PLOT 41'); +% axes('units','inches',... +% 'color',[0 .5 1],... +% 'position',[3.5 .5 2 2]); title('PLOT 42'); +% shg; +% p=mtit('the BIG title',... +% 'fontsize',14,'color',[1 0 0],... +% 'xoff',-.1,'yoff',.025); +% % refine title using its handle +% set(p.th,'edgecolor',.5*[1 1 1]); + +% created: +% us 24-Feb-2003 / R13 +% modified: +% us 24-Feb-2003 / CSSM +% us 06-Apr-2003 / TMW +% us 13-Nov-2009 17:38:17 + +%------------------------------------------------------------------------------- +function par=mtit(varargin) + + defunit='normalized'; + if nargout + par=[]; + end + +% check input + if nargin < 1 + help(mfilename); + return; + end + if isempty(get(0,'currentfigure')) + disp('MTIT> no figure'); + return; + end + + vl=true(size(varargin)); + if ischar(varargin{1}) + vl(1)=false; + figh=gcf; + txt=varargin{1}; + elseif any(ishandle(varargin{1}(:))) &&... + ischar(varargin{2}) + vl(1:2)=false; + figh=varargin{1}; + txt=varargin{2}; + else + error('MTIT> invalid input'); + end + vin=varargin(vl); + [off,vout]=get_off(vin{:}); + +% find surrounding box + ah=findall(figh,'type','axes'); + if isempty(ah) + disp('MTIT> no axis'); + return; + end + oah=ah(1); + + ou=get(ah,'units'); + set(ah,'units',defunit); + ap=get(ah,'position'); + if iscell(ap) + ap=cell2mat(get(ah,'position')); + end + ap=[ min(ap(:,1)),max(ap(:,1)+ap(:,3)),... + min(ap(:,2)),max(ap(:,2)+ap(:,4))]; + ap=[ ap(1),ap(3),... + ap(2)-ap(1),ap(4)-ap(3)]; + +% create axis... + xh=axes('position',ap); +% ...and title + th=title(txt,vout{:}); + tp=get(th,'position'); + set(th,'position',tp+off); + set(xh,'visible','off','hittest','on'); + set(th,'visible','on'); + +% reset original units + ix=find(~strcmpi(ou,defunit)); + if ~isempty(ix) + for i=ix(:).' + set(ah(i),'units',ou{i}); + end + end + +% ...and axis' order + uistack(xh,'bottom'); + axes(oah); %#ok + + if nargout + par.pos=ap; + par.oh=oah; + par.ah=xh; + par.th=th; + end +end +%------------------------------------------------------------------------------- +function [off,vout]=get_off(varargin) + +% search for pairs <.off>/ + + off=zeros(1,3); + io=0; + for mode={'xoff','yoff','zoff'}; + ix=strcmpi(varargin,mode); + if any(ix) + io=io+1; + yx=find(ix); + ix(yx+1)=1; + off(1,io)=varargin{yx(end)+1}; + varargin=varargin(xor(ix,1)); + end + end + vout=varargin; +end +%------------------------------------------------------------------------------- \ No newline at end of file diff --git a/mtrxerrs.m b/mtrxerrs.m new file mode 100644 index 0000000..7a4ff80 --- /dev/null +++ b/mtrxerrs.m @@ -0,0 +1,122 @@ +function errs = mtrxerrs(fld1, fld2, quant, thresh, tscale) + +if nargin < 5, tscale = 'monthly'; end +if nargin < 4, thresh = -inf; end +if nargin < 3, quant = 'rmse'; end + + +if strcmp(tscale, 'monthly') + % Select only simultaneous time-steps + [fld1, fld2] = find_sim_tspts(fld1, fld2); + + fld1 = fld1(2:end, 4:end); + fld2 = fld2(2:end, 4:end); + +elseif strcmp(tscale, 'mean_monthly') + + fld1 = fld1(2:end, 2:end); + fld2 = fld2(2:end, 2:end); + +end + +fld1(fld1 < thresh) = NaN; +fld2(fld2 < thresh) = NaN; + + +errs = fld1 - fld2; +nts = length(fld1(:,1)); + +switch quant + + % 1. Absolute errors + case 'ae' + errs = abs(errs); + + % 2. Mean absolute errors + case 'mae' + errs = abs(errs); + errs = nanmean(errs); + + % 3. Squared errors + case 'se' + errs = errs.^2; + + % 4. Mean Squared errors + case 'mse' + errs = errs.^2; + errs = nanmean(errs); + + % 5. Root mean squared errors + case 'rmse' + errs = errs.^2; + errs = nanmean(errs); + errs = sqrt(errs); + + % 6. Relative errors + case 're' + errs = errs./fld1; + + % 7. Absolute relative errors + case 'are' + errs = errs./fld1; + errs = abs(errs); + + % 8. Mean absolute relative errors + case 'mare' + fld1(fld1 == 0) = NaN; + errs = abs(errs); + errs = errs./fld1; + errs = nanmean(errs); + + case 'mare_2' + errs = abs(errs); + errs = nanmean(errs); + errs = errs./(max(fld1) - min(fld1)); + + % 9. Squared relative errors + case 'sre' + errs = errs./fld1; + errs = errs.^2; + + % 10. Mean squared relative errors + case 'msre' + errs = errs./fld1; + errs = errs.^2; + errs = nanmean(errs); + + % 11. Root mean squared relative erros + case 'rmsre' + errs = errs./fld1; + errs = errs.^2; + errs = nanmean(errs); + errs = sqrt(errs); + + % 12. Normalized root mean square deviation + case 'nrmse' + errs = errs.^2; + errs = nanmean(errs); + errs = sqrt(errs); + errs = errs./(max(fld1) - min(fld1)); + + % 13. Coefficient of variation of the RMSE + case 'cvrmse' + errs = errs.^2; + errs = nanmean(errs); + errs = sqrt(errs); + errs = errs./nanmean(fld1); + + case 'cvmae' + errs = abs(errs); + errs = nanmean(errs); + errs = errs./nanmean(fld1); + + % 13. Coefficient of variation of the RMSE + case 'corr' + errs = nancorr(fld1, fld2); + +end + + + + + diff --git a/nancorr.m b/nancorr.m new file mode 100644 index 0000000..79f313a --- /dev/null +++ b/nancorr.m @@ -0,0 +1,24 @@ +function C = nancorr(X, Y) +% NANCORR calculates the sample correlation coefficient +% for the series with NaNs expected. +% X is the one series, Y is another. + +[r1, c1] = size(X); +[r2, c2] = size(Y); + +if r1 ~= r2 | c1 ~= c2 + error('The samples must be of the same size') +end + +% Set the missing values to NaN +Y(isnan(X)) = NaN; +X(isnan(Y)) = NaN; + +% Compute the mean +Xm=nanmean(X); +Ym=nanmean(Y); + +num = nansum((X - ones(r1, 1)*Xm).*(Y - ones(r1, 1)*Ym)); +den = nansum((X - ones(r1, 1)*Xm).^2).*nansum((Y - ones(r1, 1)*Ym).^2); + +C = num./sqrt(den); \ No newline at end of file diff --git a/nanrmse.m b/nanrmse.m new file mode 100644 index 0000000..f61b464 --- /dev/null +++ b/nanrmse.m @@ -0,0 +1,22 @@ +function R = nanrmse(fld1, fld2); + + +fld1(isnan(fld2)) = NaN; + +if size(fld2) > 1 + fld2(isnan(fld1)) = NaN; +end + + +mask = zeros(size(fld1)); + +mask(~isnan(fld1)) = 1; +nr_obs = sum(mask); + + +R = sqrt(1./nr_obs.*nansum((fld1 - fld2).^2)); + + + + + diff --git a/netcdf2mat.m b/netcdf2mat.m new file mode 100755 index 0000000..e3e6603 --- /dev/null +++ b/netcdf2mat.m @@ -0,0 +1,97 @@ +function otpt = netcdf2mat(flist, date, var_ids); + +% ------------------------------------------------------------------------- +% The function reads one or multiple netcdf-files and stores the variables +% in a cell-array. The names of the files to be read must be provided in +% the file flist. As the date of the datasets is stored as well, a start- +% and end-date can be provided. Furthermore, if the netcdf-files contain +% multiple variables, the desired variables can be selected with the +% parameter var_ids, which is a vector containing the variable ids of the +% variables which shall be stored. +% ------------------------------------------------------------------------- +% Note: At present, the program supports only monthly input. In a future +% version, support for arbitrary timesteps will be implemented. +% ------------------------------------------------------------------------- +% Input: flist 'string' File which contains the names of the +% netcdf-files to be read +% date [1 x 4] Vector which contains start- and +% end-date of the dataset in the +% following arrangement: +% [start_year start_month end_year end_month] +% var_ids [1 x n] vector which contains the variable ids of +% the variables which shall be read +% +% Output: otpt [m x n+2] cell-structure which contains the month and +% year of a specific dataset in the first two +% columns and the desired data +% +% ------------------------------------------------------------------------- +% Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% September 2010 +% ------------------------------------------------------------------------- +% Uses: netcdf-toolbox +% ------------------------------------------------------------------------- + + +files = importdata(flist); + + + +syear = date(1); +smonth = date(2); +eyear = date(3); +emonth = date(4); + + +month = smonth; +year = syear; +nr_files = 0; + +i = 0; +flg = 0; + +while flg == 0 + i = i + 1; + + if month == emonth & year == eyear + flg = 1; + end + + date_vec(i,1:2) = [month year]; + + month = month + 1; + + if month == 13 + month = 1; + year = year + 1; + end + + + nr_files = nr_files + 1; + + + +end +keyboard +if nr_files ~= length(files) + error('Time domain does not match with number of files'); +end + + +for i = 1:length(files) + ncid = netcdf.open(files{i}, 'nowrite'); + + otpt{i,1} = date_vec(i,1); + otpt{i,2} = date_vec(i,2); + + for j = 1:length(var_ids) + otpt{i,j+2} = netcdf.getVar(ncid, var_ids(j)); + end + + netcdf.close(ncid); +end + + + + + \ No newline at end of file diff --git a/ns_coeff.m b/ns_coeff.m new file mode 100644 index 0000000..3bd983c --- /dev/null +++ b/ns_coeff.m @@ -0,0 +1,25 @@ +function E = ns_coeff(obs, mdl, tscale) + +if nargin < 3, tscale = 'monthly'; end + +if strcmp(tscale, 'monthly') + + [obs, mdl] = find_sim_tspts(obs, mdl); + + obs = obs(2:end, 4:end); + mdl = mdl(2:end, 4:end); + +elseif strcmp(tscale, 'mean_monthly') + obs = obs(2:end, 2:end); + mdl = mdl(2:end, 2:end); +end + + +d1 = (obs - mdl).^2; +d2 = (obs - ones(size(obs, 1), 1)*nanmean(obs, 1)).^2; + + +E = 1 - nansum(d1)./nansum(d2); + + +% keyboard \ No newline at end of file diff --git a/outlierdect.m b/outlierdect.m new file mode 100644 index 0000000..b60a710 --- /dev/null +++ b/outlierdect.m @@ -0,0 +1,18 @@ +function flags = outlierdect(ts, kappa, tau) + + +flags = zeros(length(ts)); + +for i = 1+kappa:length(ts)-kappa + tmp = median(ts(i-kappa:i+kappa)); + + if ts(i) - tmp < tau + flags(i) = 0; + else + flags(i) = 1; + end +end + + + + diff --git a/pdfplot.m b/pdfplot.m new file mode 100644 index 0000000..78a342e --- /dev/null +++ b/pdfplot.m @@ -0,0 +1,40 @@ +function pdfplot(xin, nbins) +% pdfplot(X, nbins) +% displays a histogram of the empirical probability density function (PDF) +% for the data in the input array X using nbins number of bins +% (by default pdfplot sets nbins to 20). +% If input X is a matrix, then pdfplot(X) parses it to the vector and +% displays PDF of all values. +% For complex input X, pdfplot(X) displays PDF of abs(X). +% +% Example: +% y = randn( 1, 1e5 ); +% pdfplot( y ); +% pdfplot( y, 100 ); + +% Version 1.0 +% Alex Bur-Guy, September 2003 +% alex@wavion.co.il +% +% Revisions: +% Version 1.0 - initial version + +if nargin == 1, nbins = 20; end +xin = reshape( xin, numel(xin), 1 ); +if ~isreal( xin ), xin = abs( xin ); end +minXin = min(xin); maxXin = max(xin); +if floor( nbins ) ~= nbins, error( 'Number of bins should be integer value' ); end +if nbins < 2, error( 'Number of bins should be positive integer greater than 1 ' ); end +figure; +if minXin == maxXin + bar(minXin,1); + axis([minXin - 10, minXin + 10, 0, 1]); +else + step = (maxXin - minXin) / (nbins-1); + binc = minXin : step : maxXin; + [N, X] = hist(xin, binc); + bar(X, N/sum(N)); +end +xlabel('X', 'FontWeight','b','FontSize',12); +title(['PDF(X) based on ' num2str(length(xin)) ' data samples @ ' num2str(nbins) ' bins'], 'FontWeight','b','FontSize',12); +grid on; diff --git a/plfit.m b/plfit.m new file mode 100644 index 0000000..d0e2c9a --- /dev/null +++ b/plfit.m @@ -0,0 +1,92 @@ +function [a, b] = plfit(x,y,w,xmin,xmax) + +% This function computes the parameters a and b of a power law y = a*x^b. +% Therefore, the input data and the computation points are first +% transformed to the logarithmic scale, where a power law can be +% approximated by a straight line according to +% +% y(x) = a*x^b -> log10(y) = log10(a*x^b) = log10(a) + k*log10(x) +% with log10(x) = u and log10(y) = v +% v = b*u + log10(a) +% +%-------------------------------------------------------------------------- +% Input: x [n x 1] computation points +% y [m x 1] data +% w [m x 1] weights (if no weights should be applied, +% w = 1), (default) +% +% Output xht [2 x 1] Estimated parameters of the power law where +% xht = [a b]' and A = exp(a) +% yht [m x 1] Adjusted observations +% sig [2 x 1] Standard deviations of the estimated parameters +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: September 2008 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- +% Updates: - 11.10.2011: Added the parameter w to do a weighted estimation +% Added some help text. (CL) + + +% Checking the input arguments +if length(x) ~= length(y) + error('X and Y must have the same size!') +end + +if size(x,2) > 1; x = x'; end +if size(y,2) > 1; y = y'; end + +if nargin < 3 + w = 1; +elseif length(w) ~= length(y) & size(w) ~= 1 + error('Y and W must have the same size!') +elseif size(w) == 1 + w = ones(length(y)); +end + +sind = find(x == xmin) +if nargin < 5 + [tmp, eind] = min(y(sind+1:end)); + eind +else + eind = find(x == xmax) +end + + + + +v = log10(y(sind:eind)); +u = log10(x(sind:eind)); + +% Data-size +n = length(u); + +% Set up the coefficient matrix +A = [ones(n, 1) u]; + +% Arrange the weights in a diagonal matrix (no correlations between the +% power-law coefficients are assumed) +P = diag(w(sind:eind)); + +% Computation of the normal matrix +N = inv(A'*P*A); + +% Estimation of the power-law parameters +xht = N*A'*P*v; + +% Compute the adjusted observations +yht = A*xht; + +% Compute the standard deviations of the estimated parameters +sig = sqrt(diag(N)); + +a = 10^xht(1); +b = xht(2); + + + + + + + diff --git a/plot_taylor.m b/plot_taylor.m new file mode 100644 index 0000000..0e60af2 --- /dev/null +++ b/plot_taylor.m @@ -0,0 +1,80 @@ +c_indx = [7 6 3 8 9 1]; + +fname{1} = 'P_taylor_NA'; +fname{2} = 'P_taylor_SA'; +fname{3} = 'P_taylor_E'; +fname{4} = 'P_taylor_AF'; +fname{5} = 'P_taylor_AS'; +fname{6} = 'P_taylor_AU'; + +clr{1} = 'k'; +clr{2} = 'm'; +clr{3} = 'c'; +clr{4} = 'b'; +clr{5} = 'r'; +clr{6} = 'g'; + +% for i = 1:6 +% for k = 1:2 + h = figure('papersize', [4.5 4], 'paperunits', 'centimeters') + +% [hp ht axl] = taylordiag_new(sig_n{k,c_indx(i)}, E_n{k,c_indx(i)}, ... +% R{k,c_indx(i)}, 'colRMS', 'k', 'colSTD', 'k', ... +% 'colCOR', 'k', 'styleRMS', '-', 'tickSTD', ... +% 0:0.25:2, 'tickRMS', 0:0.25:1.25, 'limSTD', 2, ... +% 'titleCOR', 'Correlation', 'titleSTD', ' ', ... +% 'titleRMS', ' ', 'labeldta', 0, 'pointclr', clr ) + [hp ht axl] = taylordiag_new(sig_n_jan, E_n_jan, ... + R_jan, 'colRMS', 'k', 'colSTD', 'k', ... + 'colCOR', 'k', 'styleRMS', '-', 'tickSTD', ... + 0:0.25:2, 'tickRMS', 0:0.25:1.25, 'limSTD', 2, ... + 'titleCOR', 'Correlation', 'titleSTD', ' ', ... + 'titleRMS', ' ', 'labeldta', 0, 'pointclr', clr ) + + +% set(axl(1,1).handle, 'string', '\sigma_P', 'fontsize', 20) +% if i == 1 && k == 1 + yps = legend(hp(1,:), 'GPCC', 'CRU', 'CPC', 'ECMWF', 'MERRA', 'CFSR') + set(yps, 'fontsize', 20, 'location', 'southwest') + keyboard + print(h, '-depsc2', 'cont_mean_jan.eps') + + + h = figure('papersize', [4.5 4], 'paperunits', 'centimeters') + +% [hp ht axl] = taylordiag_new(sig_n{k,c_indx(i)}, E_n{k,c_indx(i)}, ... +% R{k,c_indx(i)}, 'colRMS', 'k', 'colSTD', 'k', ... +% 'colCOR', 'k', 'styleRMS', '-', 'tickSTD', ... +% 0:0.25:2, 'tickRMS', 0:0.25:1.25, 'limSTD', 2, ... +% 'titleCOR', 'Correlation', 'titleSTD', ' ', ... +% 'titleRMS', ' ', 'labeldta', 0, 'pointclr', clr ) + [hp ht axl] = taylordiag_new(sig_n_jul, E_n_jul, ... + R_jul, 'colRMS', 'k', 'colSTD', 'k', ... + 'colCOR', 'k', 'styleRMS', '-', 'tickSTD', ... + 0:0.25:2, 'tickRMS', 0:0.25:1.25, 'limSTD', 2, ... + 'titleCOR', 'Correlation', 'titleSTD', ' ', ... + 'titleRMS', ' ', 'labeldta', 0, 'pointclr', clr ) + + +% set(axl(1,1).handle, 'string', '\sigma_P', 'fontsize', 20) +% if i == 1 && k == 1 +% yps = legend(hp(1,:), 'GPCC', 'CRU', 'CPC', 'ECMWF', 'MERRA', 'CFSR') +% set(yps, 'fontsize', 20, 'location', 'southwest') +% keyboard + print(h, '-depsc2', 'cont_mean_jul.eps') +% end +% % if k == 1 +% fnme = ['cont_mean_, '_Jan.eps']; +% elseif k == 2 +% fnme = [fname{i}, '_Jul.eps']; +% elseif k == 3 +% fnme = [fname{i}, '_JAS.eps']; +% elseif k == 4 +% fnme = [fname{i}, '_OND.eps']; +% % end +% print(h, '-depsc2', fnme) +% close all +% end +% end +% + \ No newline at end of file diff --git a/plot_taylor_t2.m b/plot_taylor_t2.m new file mode 100644 index 0000000..da15246 --- /dev/null +++ b/plot_taylor_t2.m @@ -0,0 +1,61 @@ +function [] = plot_taylor_t2(cind, time); +close all +if nargin < 2 + time = [1989 2006]; +end + +load /home/lorenz-c/Data/Catchment_agg/merra_cagg_t2_sel.mat +load /home/lorenz-c/Data/Catchment_agg/cfsr01_cagg_t2_sel.mat +load /home/lorenz-c/Data/Catchment_agg/ecmwf_cagg_t2_sel.mat +load /home/lorenz-c/Data/Catchment_agg/cru_cagg_t2_sel.mat +load /home/lorenz-c/Data/Catchment_agg/del_cagg_t2_sel.mat + +r = find(cell2mat(merra_cagg_t2_sel(:,2)) == cind); + +ecmwf_sind = find(ecmwf_cagg_t2_sel{1,3}(:,1) == 1 & ... + ecmwf_cagg_t2_sel{1,3}(:,2) == time(1)); +ecmwf_eind = find(ecmwf_cagg_t2_sel{1,3}(:,1) == 12 & ... + ecmwf_cagg_t2_sel{1,3}(:,2) == time(2)); + +merra_sind = find(merra_cagg_t2_sel{1,3}(:,1) == 1 & ... + merra_cagg_t2_sel{1,3}(:,2) == time(1)); +merra_eind = find(merra_cagg_t2_sel{1,3}(:,1) == 12 & ... + merra_cagg_t2_sel{1,3}(:,2) == time(2)); + +cfsr_sind = find(cfsr_cagg_t2_sel{1,3}(:,1) == 1 & ... + cfsr_cagg_t2_sel{1,3}(:,2) == time(1)); +cfsr_eind = find(cfsr_cagg_t2_sel{1,3}(:,1) == 12 & ... + cfsr_cagg_t2_sel{1,3}(:,2) == time(2)); + +cru_sind = find(cru_cagg_t2_sel{1,3}(:,1) == 1 & ... + cru_cagg_t2_sel{1,3}(:,2) == time(1)); +cru_eind = find(cru_cagg_t2_sel{1,3}(:,1) == 12 & ... + cru_cagg_t2_sel{1,3}(:,2) == time(2)); + +del_sind = find(del_cagg_t2_sel{1,3}(:,1) == 1 & ... + del_cagg_t2_sel{1,3}(:,2) == time(1)); +del_eind = find(del_cagg_t2_sel{1,3}(:,1) == 12 & ... + del_cagg_t2_sel{1,3}(:,2) == time(2)); + +[R E sig] = taylor_stats(cru_cagg_t2_sel{r,3}(cru_sind:cru_eind,3), ... + del_cagg_t2_sel{r,3}(del_sind:del_eind,3), ... + ecmwf_cagg_t2_sel{r,3}(ecmwf_sind:ecmwf_eind,3), ... + merra_cagg_t2_sel{r,3}(merra_sind:merra_eind,3), ... + cfsr_cagg_t2_sel{r,3}(cfsr_sind:cfsr_eind,3)); + +[hp ht axl] = taylordiag(sig, E, R) + +set(ht(1), 'String', 'CRU') +set(ht(2), 'String', 'DEL') +set(ht(3), 'String', 'ECMWF') +set(ht(4), 'String', 'MERRA') +set(ht(5), 'String', 'CFSR') + +tlte = ['Averaged monthly temperature for ', merra_cagg_t2_sel{r,1}]; +title(tlte, 'fontsize', 14); + +filenme = ['/home/lorenz-c/Dokumente/Projektarbeit/Analysis/Precipitation/Timeseries/T2_Tlr_', ... + merra_cagg_t2_sel{r,1}] + +print('-depsc', filenme); + \ No newline at end of file diff --git a/ploteofts.m b/ploteofts.m new file mode 100644 index 0000000..73b7f77 --- /dev/null +++ b/ploteofts.m @@ -0,0 +1,29 @@ +function F = ploteofts(pc, eof) + + +theta = 89.75:-0.5:-89.75; +lambda = -179.75:0.5:179.75; + +load coast + +scrsz = get(0,'ScreenSize'); +% figure('OuterPosition',[1 scrsz(4)/2 scrsz(3)/3 scrsz(4)/2]) +figure('OuterPosition',[1 scrsz(4)/4 scrsz(3)/3 scrsz(4)/4]) +% +% cx_max = max(pc)*max(max(eof)) +% cx_min = -min(pc)*min(min(eof)) + +for i = 1:732 +% inpt_map = pc(i)*eof; + inpt_map = 1*eof{i}; + imagesc(lambda, theta, inpt_map) + hold on + plot(long, lat, 'k', 'linewidth', 2) + pbaspect([2 1 1]) + axis xy + caxis([-50 50]) +% axis([-100 -50 -30 20]) + g = colorbar('eastoutside', 'fontsize', 14); + hold off + F(i) = getframe; +end diff --git a/plotglbl.m b/plotglbl.m new file mode 100644 index 0000000..323c928 --- /dev/null +++ b/plotglbl.m @@ -0,0 +1,33 @@ +function [f, g] = plotglbl(inpt_map, theta, lambda) + +if nargin < 3, lambda = -179.75:0.5:179.75; end +if nargin < 2, theta = 89.75:-0.5:-89.75; end + + +load coast + +scrsz = get(0,'ScreenSize'); +f = figure('OuterPosition',[1 scrsz(4)/2 scrsz(3)/3 scrsz(4)/2]) + +imagesc(lambda, theta, inpt_map) +hold on +plot(long, lat, 'k', 'linewidth', 1.5) +pbaspect([2 1 1]) +axis xy +g = colorbar('eastoutside', 'fontsize', 14); + +% +% colormap([160 0 200; +% 130 0 220; +% 30 60 255; +% 0 160 255; +% 0 200 200; +% 0 210 140 ; +% 0 220 0 ; +% 160 230 50; +% 230 220 50 ; +% 230 175 45 ; +% 240 130 40; +% 250 60 60; +% 240 0 130]/255); + diff --git a/plotglblts.m b/plotglblts.m new file mode 100644 index 0000000..771c43e --- /dev/null +++ b/plotglblts.m @@ -0,0 +1,29 @@ +function [] = plotglblts(inpt_map, cxs, unit, ttle) + +if nargin < 4 + ttle = ' '; +end + +if nargin < 3 + unit = '[mm/month]'; +end + +theta = 89.75:-0.5:-89.75; +lambda = -179.75:0.5:179.75; + +load coast + +scrsz = get(0,'ScreenSize'); +figure('OuterPosition',[1 scrsz(4)/2 scrsz(3)/3 scrsz(4)/2]) + +for i = 1:length(inpt_map) + imagesc(lambda, theta, inpt_map{i}) + hold on + plot(long, lat, 'k', 'linewidth', 1.5) + pbaspect([2 1 1]) + axis xy + caxis([cxs(1) cxs(2)]) + g = colorbar('eastoutside', 'fontsize', 14); + hold off + F(i) = getframe; +end \ No newline at end of file diff --git a/ploticas.m b/ploticas.m new file mode 100644 index 0000000..7fc86fe --- /dev/null +++ b/ploticas.m @@ -0,0 +1,57 @@ +function [] = ploticas(spatmaps, tseries, maxmde, tscale, axlims, theta, lambda) + +if nargin < 7, lambda = -179.75:0.5:179.75; end +if nargin < 6, theta = 89.75:-0.5:-89.75; end +if nargin < 5, axlims = [-180 180 -90 90]; end +if nargin < 4, tscale = 'monthly'; end +if nargin < 3, maxmde = length(spatmaps); end + + +if strcmp(tscale, 'monthly') + syr = input('Start year: \n') + smn = input('Start month: \n') + + for i = 1:length(tseries) + dte(i) = datenum(syr, smn, 15); + smn = smn + 1; + if smn == 13 + smn = 1; + syr = syr + 1; + end + end +elseif strcmp(tscale, 'annual') + syr = input('Start year: \n') + dte = syr:1:syr + length(tseries)-1; +end + + + +load coast +figure + +for i = 1:maxmde + subplot(2, maxmde, i); + imagesc(lambda, theta, spatmaps{i}); + axis xy + hold on + plot(long, lat, 'k', 'linewidth', 1.5); + axis(axlims); + pbaspect([length(axlims(1):axlims(2)), length(axlims(3):axlims(4)), 1]) + modenr = ['IC Mode ', num2str(i)]; + title(modenr) +end + + + + +for i = 1:maxmde + subplot(2, maxmde, maxmde + i); + plot(dte, tseries(:, i)); + if strcmp(tscale, 'monthly') + datetick('x', 'yyyy'); + end + axis([dte(1), dte(end), min(tseries(:,i)), max(tseries(:,i))]); + end + + + diff --git a/plotrgnl.m b/plotrgnl.m new file mode 100644 index 0000000..616be9b --- /dev/null +++ b/plotrgnl.m @@ -0,0 +1,118 @@ +function varargout = plotrgnl(nrplots, lims, dx, dy, pltrvrs, pltussts, area_mask, cxis, varargin) + +if isempty(dx) + lambda = -179.75:0.5:179.75; +else + lambda = -180+dx/2:dx:180-dx/2; +end + +if isempty(dy) + theta = 89.75:-0.5:-89.75; +else + theta = 90-dy/2:-dy:-90+dy/2; +end + + +if size(lims) == [1, 1] +% load indexfile3.asc +% mask = zeros(360, 720); + mask = zeros(size(area_mask)); + mask(area_mask == lims) = 1; + [r, c] = find(mask == 1); + mask(mask == 0) = NaN; + + lims = [min(lambda(c))-2 max(lambda(c))+2 ... + min(theta(r))-2 max(theta(r))+2]; +else + for i = 1:length(varargin) + mask{i} = ones(size(varargin{i})); + end +end + +if pltrvrs == 1 + S = shaperead('worldrivers.shp'); +end + +if pltussts == 1 + US = shaperead('/media/storage/Data/Masks/USA_States/states.shp'); +end + + +load coast + +scrsz = get(0, 'ScreenSize'); +% f = figure('OuterPosition',[1 scrsz(4)/2 scrsz(3)/3 scrsz(4)/2]); +f = figure('OuterPosition',[1 -scrsz(4)/3 scrsz(3)/2 scrsz(4)/2]); +if length(varargin) > 1 + for i = 1:length(varargin) + h(i) = subplot(nrplots(1), nrplots(2), i) + imagesc(lambda, theta, varargin{i}.*mask{i}) + hold on + plot(long, lat, 'k', 'linewidth', 0.2) + if pltrvrs == 1 + for j = 1:128 + plot(S(j).X, S(j).Y, 'b', 'linewidth', 0.2) + end + end + if pltrvrs == 1 + for j = 1:128 + plot(S(j).X, S(j).Y, 'b', 'linewidth', 0.2) + end + end + if pltussts == 1 + for i = 1:49 + plot(US(i).X, US(i).Y, 'k', 'linewidth', 0.2) + end + end + hold off + + axis xy + axis([lims(1) lims(2) lims(3) lims(4)]) + pbaspect([abs(lims(1) - lims(2)) abs(lims(3) - lims(4)) 1]); + caxis([cxis(1) cxis(2)]) + + % g(i) = colorbar('eastoutside', 'fontsize', 10); + +% xlabel('Longitude', 'fontsize', 12) +% ylabel('Latitude', 'fontsize', 12) + end +else + h = subplot(1,1,1); + imagesc(lambda, theta, varargin{1}) + hold on + plot(long, lat, 'k', 'linewidth', 1.5) + if pltrvrs == 1 + for i = 1:128 + plot(S(i).X, S(i).Y, 'b', 'linewidth', 1.5) + end + end + if pltussts == 1 + for i = 1:49 + plot(US(i).X, US(i).Y, 'k', 'linewidth', 1.5) + end + end +% hold off + + axis xy + axis([lims(1) lims(2) lims(3) lims(4)]) + pbaspect([abs(lims(1) - lims(2)) abs(lims(3) - lims(4)) 1]); + caxis([cxis(1) cxis(2)]) + + g = colorbar('eastoutside', 'fontsize', 12); + xlabel('Longitude', 'fontsize', 12) + ylabel('Latitude', 'fontsize', 12) +end + +varargout{1} = f; +varargout{2} = h; +%varargout{3} = g; + + + + + + + + + + diff --git a/plotspec.m b/plotspec.m new file mode 100644 index 0000000..b07e53e --- /dev/null +++ b/plotspec.m @@ -0,0 +1,47 @@ +function [] = plotspec(type, f, ax, varargin) + + +if isempty(type) + type = 'Amp'; +end + +clr = [ 0 0 0; + 30 60 255; + 250 60 60; + 0 220 0; + 240 130 40; + 0 200 200; + 230 220 50; + 160 0 200; + 160 230 50; + 0 160 255; + 240 0 130; + 230 175 45; + 0 210 140; + 130 0 220]/255; + + +figure + +for i = 1:length(varargin) + + spectrm = varargin{i}; + if strcmp(ax, 'lin') + plot(f, spectrm, 'Color', clr(i,:), 'linewidth', 1) + elseif strcmp(ax, 'log') + semilogy(f, spectrm, 'Color', clr(i,:), 'linewidth', 1) + end + hold on +end + +xlabel('Frequency', 'fontsize', 12); + +if strcmp(type, 'Amp') + ylabel('|Y(f)|', 'Fontsize', 12); + title('Single-sided amplitude spectrum', 'fontsize', 12); +elseif strcmp(type, 'PSD') + ylabel('Power/Frequency', 'fontsize', 12); + title('Power spectral density', 'fontsize', 12); +end + + \ No newline at end of file diff --git a/plwmtrx.m b/plwmtrx.m new file mode 100644 index 0000000..6a44866 --- /dev/null +++ b/plwmtrx.m @@ -0,0 +1,76 @@ +function K = plwmtrx(a, b, lmx, otpt, quant) + +% The function generates (inverse) degree variances or (inverse) rms-values +% from an estimated polynomial fit signal covariance with the coefficients +% p. The output can be arranged in any format (matrix, vector, sc, cs or +% only the coefficients for the desired oder m). +%-------------------------------------------------------------------------- +% Input: p [1 x k] vector with the power law parameters +% (e.g. p = [a b] for a fitted power law) +% +% lmx [1 x 1] maximal degree of expansion +% (default: lmx = 60) +% otpt Defines the output format: +% 'cs' -> K is a (lmax+1) x (lmax+1) matrix in +% cs-format +% 'sc' -> K is a (lmax+1) x (2*lmax + 1) matrix +% in sc-format +% 'vec' -> K is a (lmax+1)^2 x 1 vector +% quant Defines the output quantity +% 'dv' -> K contains degree variances +% 'dv_1' -> K contains inverse squared degree +% variances +% 'rms' -> K contains rms-values +% 'rms_1' -> K contains inverse squared rms-values +% default: otpt = 'dv' +% Output: K [n x n] coefficients of the estimated signal covariance +% in the desired output format +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: 4.12.2009 +%-------------------------------------------------------------------------- +% Uses: mat2vec.m, sc2cs.m, cs2sc.m +%-------------------------------------------------------------------------- +% Updates: - 11.10.2011: Code brush up (CL) + +if nargin < 2 + lmx = 60; +end + +if nargin < 3 + otpt = 'mat' +end + +if nargin < 5 + quant = []; +end + +% Creating a vector with logarithmic degree values +l = standing(0:lmx); + +% Evaluation of the polynomial at the computation points l +tmpsig = a*l.^b; + +% Compute the desired output quantity +if strcmp(quant, 'dv_1') % Inverse degree variances + tmpsig = 1./tmpsig; +elseif strcmp(quant, 'rms') % Degree-rms + tmpsig = sqrt(tmpsig./(2*l+1)); +elseif strcmp(quant, 'rms_1') % Inverse (squared) degree-rms + tmpsig = 1./(tmpsig./(2*l+1)); +end + + +% Adding zeros as deg. 0 and 1 values and rearange the coefficients in the +% s/c-format +tmpsig = repmat([0; 0; tmpsig(3:end)], 1, 2*lmx+1).* ... + [fliplr(tril(ones(lmx+1, lmx), -1)) tril(ones(lmx+1, lmx+1), 0)]; + +% Rearranging the values into the desired output format +if strcmp(otpt, 'vec') + K = mat2vec(tmpsig); +elseif strcmp(otpt, 'cs') + K = sc2cs(tmpsig); +elseif strcmp(otpt, 'sc') + K = tmpsig; +end diff --git a/prep_cfsr_data.m b/prep_cfsr_data.m new file mode 100644 index 0000000..2112023 --- /dev/null +++ b/prep_cfsr_data.m @@ -0,0 +1,41 @@ +function otpt = prep_cfsr_data(fname, quant, nme) + +mnth = 1; +year = 1979; +tmp = nj_varget(fname, quant); + +if size(tmp,2) == 361 + grd_flg = 1; +elseif size(tmp,2) == 360 + grd_flg = 0; +end +for i = 1:size(tmp,1) + + otpt{i,1} = 'CFSR'; + otpt{i,2} = nme; + otpt{i,3} = 1; + otpt{i,4} = mnth; + otpt{i,5} = year; + otpt{i,6} = 'Global'; + otpt{i,7} = 89.75:-0.5:-89.75; + otpt{i,8} = -179.75:0.5:179.75; + + otpt{i,9} = shiftdim(tmp(i,:,:)); + + if grd_flg == 1 + otpt{i,9} = (otpt{i,9}(1:end-1,:) + otpt{i,9}(2:end,:))/2; + otpt{i,9} = (otpt{i,9} + [otpt{i,9}(:, 2:end) otpt{i,9}(:, 1)])/2; + end + + otpt{i,9} = [otpt{i,9}(:, 361:end) otpt{i,9}(:,1:360)]; + + + + mnth = mnth + 1; + if mnth == 13 + mnth = 1; + year = year + 1; + end + + +end \ No newline at end of file diff --git a/prep_data.m b/prep_data.m new file mode 100644 index 0000000..7c2fa85 --- /dev/null +++ b/prep_data.m @@ -0,0 +1,11 @@ +function otpt = prep_data(inpt, time, clms, mval); + + +mnths = cell2mat(inpt(:, clms(1))); +yrs = cell2mat(inpt(:, clms(2))); + +sind = find(mnths == 1 & yrs == time(1)); +eind = find(mnths == 12 & yrs == time(2)); + + +otpt = inpt(sind:eind, clms(1:3)); \ No newline at end of file diff --git a/prep_grace.m b/prep_grace.m new file mode 100644 index 0000000..b06a318 --- /dev/null +++ b/prep_grace.m @@ -0,0 +1,128 @@ +function [otpt, mn_pwr] = prep_grace(inpt, type, clms, seyrs) + +% Pre-processing of GRACE-data. This function can be used to e.g. compute +% the mean over a given time-period, to remove a trend or compute the +% derivative of the GRACE coefficients. It also removes the degree +% 0/1-coefficients from the data. +%-------------------------------------------------------------------------- +% Input: inpt {n x m} Cell-array containing +% month/year/signal/error +% +% type 'string' Defines the type of preprocessing: +% 'mean' -> mean is removed +% 'trend' -> trend is removed (TBI) +% 'derr' -> derivatives are computed (TBI) +% clms [1 x 4] columns in inpt containing month, year, +% signal and errors +% seyrs [1 x 2] start- and end-year of the considered +% time-series +% +% Output: otpt {n x 4} pre-processed GRACE data containing +% month/year/signal/error +%-------------------------------------------------------------------------- +% Author: Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% Date: October 20011 +%-------------------------------------------------------------------------- +% Uses: mod01.m +%-------------------------------------------------------------------------- + +if nargin < 4 + seyrs = [2003 2010]; +end + +if nargin < 3 + clms = [5 4 9 10]; +end + +if nargin < 2 + type = 'mean'; +end + + +mnth = cell2mat(inpt(:, clms(1))); +yr = cell2mat(inpt(:, clms(2))); + +s_ind = find(mnth == 1 & yr == seyrs(1)); +e_ind = find(mnth == 12 & yr == seyrs(2)); + +mnth = mnth(s_ind:e_ind); +yr = yr(s_ind:e_ind); + +n = length(mnth); + +signal = inpt(s_ind:e_ind, clms(3)); +error = inpt(s_ind:e_ind, clms(4)); + +maxdeg = size(signal{1}, 1)-1; + + +if strcmp(type, 'mean') + + mnth = cell2mat(inpt(:, clms(1))); + yr = cell2mat(inpt(:, clms(2))); + + s_ind = find(mnth == 1 & yr == seyrs(1)); + e_ind = find(mnth == 12 & yr == seyrs(2)); + + mnth = mnth(s_ind:e_ind); + yr = yr(s_ind:e_ind); + + n = length(mnth); + + signal = inpt(s_ind:e_ind, clms(3)); + error = inpt(s_ind:e_ind, clms(4)); + + + + mn_sig = zeros(size(signal{1})); + mn_err = zeros(size(error{1})); + + for i = 1:n + mn_sig = mn_sig + signal{i}; + mn_err = mn_err + error{i}.^2; + end + + mn_sig = mn_sig/n; + mn_err = mn_err/(n^2); + + mnth = cell2mat(inpt(:, clms(1))); + yr = cell2mat(inpt(:, clms(2))); + + n = length(mnth); + + signal = inpt(1:end, clms(3)); + error = inpt(1:end, clms(4)); + + + + + mn_pwr = zeros(maxdeg+1,2); + + for i = 1:n + + otpt{i,1} = mnth(i); + otpt{i,2} = yr(i); + tmp = signal{i} - mn_sig; + otpt{i,3} = mod01(tmp, 'rep'); + tmp = sqrt(error{i}.^2 + mn_err); + otpt{i,4} = mod01(tmp, 'rep'); + + otpt{i,5} = degvar(otpt{i,3}, maxdeg, 0, 'none', 0, 0); + otpt{i,6} = degvar(otpt{i,4}, maxdeg, 0, 'none', 0, 0); + + mn_pwr(:,2) = mn_pwr(:,2) + log10(otpt{i,5}(:,2)); + + + end + mn_pwr(:,1) = standing(0:maxdeg); + mn_pwr(:,2) = mn_pwr(:,2)/n; + + + end + + + + + + + \ No newline at end of file diff --git a/prep_grace_cov.m b/prep_grace_cov.m new file mode 100644 index 0000000..fe0f01e --- /dev/null +++ b/prep_grace_cov.m @@ -0,0 +1,67 @@ +function [ ] = prep_grace_cov(inames, type, Q_mean, onames) + +% Pre-processing of GRACE-covariance matrices. This function can be used +% to e.g. compute the mean over a given time-period or compute the +% derivative of the GRACE coefficients. The mean is computed according to +% the laws of error propagation +%-------------------------------------------------------------------------- +% Input: fnames 'cell' List of filenames which should be +% loaded and preprocessed. +% +% type 'string' Defines the type of preprocessing: +% 'mean' -> mean is removed +% 'derr' -> derivatives are computed (TBI) +% +% Output: otpt {n x 4} pre-processed GRACE data containing +% month/year/signal/error +%-------------------------------------------------------------------------- +% Author: Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% Date: October 20011 +%-------------------------------------------------------------------------- +% Uses: mod01.m +%-------------------------------------------------------------------------- + +if nargin < 2 + type = 'mean'; +end + + +n = length(inames); + +load(Q_mean); + +if isstruct(Q_mean) + Q_mean = [Q_mean.NW Q_mean.SW'; Q_mean.SW Q_mean.SE]; +end + +hwb = waitbar(0,'Percentage of covariance matrices processed ...'); +set(hwb,'NumberTitle','off','Name','Covariance preprocessing ') + +for i = 1:n + + load(inames{i}); + + if isstruct(Q) + Qn = [Q.NW Q.SW'; Q.SW Q.SE] + Q_mean; + else + Qn = Q + Q_mean; + end + + clear Q + + Q.NW = Qn(1:1891, 1:1891); + Q.SW = Qn(1892:end, 1:1891); + Q.SE = Qn(1892:end, 1892:end); + + save(onames{i}, 'Q'); + + waitbar((i)/(n)) + +end + +close(hwb) + + + + + \ No newline at end of file diff --git a/quartile.m b/quartile.m new file mode 100644 index 0000000..e696fd0 --- /dev/null +++ b/quartile.m @@ -0,0 +1,42 @@ +% +% Copyright (C) 2011-2012 Alex Bikfalvi +% +% This program is free software; you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either version 3 of the License, or (at +% your option) any later version. + +% This program is distributed in the hope that it will be useful, but +% WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% General Public License for more details. + +% You should have received a copy of the GNU General Public License +% along with this program; if not, write to the Free Software +% Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +% + +function [q1 q2 q3 fu fl ou ol] = quartile(x) + +% rank the data +y = sort(x); + +% compute 50th percentile (second quartile) +q2 = nanmedian(y); + +% compute 25th percentile (first quartile) +q1 = nanmedian(y(y<=q2)); + +% compute 75th percentile (third quartile) +q3 = nanmedian(y(y>=q2)); + +% compute Interquartile Range (IQR) +IQR = q3-q1; + +fl = min(y(y>=q1-1.5*IQR)); +fu = max(y(y<=q3+1.5*IQR)); + +ol = y(yq3+1.5*IQR); + +end \ No newline at end of file diff --git a/random_kalman.m b/random_kalman.m new file mode 100644 index 0000000..eff2f3d --- /dev/null +++ b/random_kalman.m @@ -0,0 +1,42 @@ +% Kalman Filter - Random constant example +function [x, P, K, xs, Ps, yn] = random_kalman(R, yn) + + + + +t_v = 0.1; % True rms of the measurements +t_m = 0.5; % True constant value + +Q = 1e-5; % Process variance + + +x_start = 0; % First guess: x = 0; +P_start = 1; % initial error covariance is 1 + +if nargin < 1 + R = t_v^2; +end + +if nargin < 2 + yn = t_m + t_v*randn(50,1); % Computing the observations with mean t_m + % and standard deviation t_v +end + +for i = 1:50 + % Prediction step + if i == 1 + xs(i) = x_start; + Ps(i) = P_start + Q; + else + xs(i) = x(i-1); % Predict the state + Ps(i) = P(i-1) + Q; % Predict the error covariance + end + + % Correction step + K(i) = Ps(i)/(Ps(i) + R); % Computing the Kalman gain + x(i) = xs(i) + K(i)*(yn(i)-xs(i)); + P(i) = (1-K(i))*Ps(i); +end + + + diff --git a/read_cfsr.m b/read_cfsr.m new file mode 100644 index 0000000..4057cec --- /dev/null +++ b/read_cfsr.m @@ -0,0 +1,55 @@ +function cfsr = read_cfsr(fname, variable); + +ncRef = fname; +sprintf('Reading file %s', fname) +flds = nj_varget(fname, variable); +tme = nj_varget(ncRef, 'time'); +sprintf('...done') + +nr_timesteps = length(tme); + +year = 1979; +month = 1; + +for i = 1:nr_timesteps + + dte(i,1) = month; + dte(i,2) = year; + month = month + 1; + + if month == 13 + month = 1; + year = year + 1; + end + +end + + +if strcmp(variable, 'Total_precipitation') + fldnme = 'PREC'; + unit = '[mm/month]'; +elseif strcmp(variable, 'Temperature') + fldnme = 'T2'; + unit = '[°C]'; +end + +h = waitbar(0,'','Name','Reading progress...'); + +for i = 1:nr_timesteps + cfsr{i,9} = shiftdim(flds(i,:,:)); + cfsr{i,9} = resizem(cfsr{i,9}, [360, 720]); + cfsr{i,9} = [cfsr{i,9}(:, 361:end) cfsr{i,9}(:, 1:360)]; + cfsr{i,1} = 'CFSR'; + cfsr{i,2} = fldnme; + cfsr{i,3} = 1; + cfsr{i,4} = dte(i,1); + cfsr{i,5} = dte(i,2); + cfsr{i,6} = 'Global'; + cfsr{i,7} = 89.76:-0.5:-89.75; + cfsr{i,8} = -179.75:0.5:179.75; + cfsr{i,10} = unit; + waitbar(i/nr_timesteps, h, [int2str(i) '/' int2str(nr_timesteps) ' time-steps']) +end +close(h) + + diff --git a/read_cpc.m b/read_cpc.m new file mode 100644 index 0000000..39bd9f8 --- /dev/null +++ b/read_cpc.m @@ -0,0 +1,44 @@ +function cpc = read_cpc(fnames) +% The function reads a file from GPCC and stores its values and the +% appropriate deviations in a structure variable + + +h = waitbar(0,'','Name','...% datafiles processed'); +nr_files = length(fnames); + +cpc = []; + +for i = 1:nr_files + yr = str2num(fnames{i}(5:8)); + + ncid = netcdf.open(fnames{i}, 'nowrite'); + data = netcdf.getvar(ncid,3); + + for j = 1:12 + tmp{j,1} = 'CPC'; + tmp{j,2} = 'PREC'; + tmp{j,3} = 1; + tmp{j,4} = j; + tmp{j,5} = yr; + tmp{j,6} = 'Global'; + tmp{j,7} = 89.75:-0.5:-89.75; + tmp{j,8} = -179.75:0.5:179.75; + + tmp{j,9} = data(:,:,j); + tmp{j,9} = flipud(double(tmp{j,9}')); + tmp{j,9} = [tmp{j,9}(:, 361:end) tmp{j,9}(:,1:360)]; + + tmp{j,10} = '[mm/month]'; + + end + + cpc = [cpc; tmp]; + clear tmp + + waitbar(i/nr_files, h, [int2str((i*100)/nr_files) '%']) +end +close(h) + + + + diff --git a/read_del.m b/read_del.m new file mode 100644 index 0000000..51dfc2d --- /dev/null +++ b/read_del.m @@ -0,0 +1,62 @@ +function del = read_del(fnames) +% The function reads a set of Precipitation or Temperature datasets in the +% format which is used by the University of Delaware and stores the +% formatted data in a structure variable. +% The missing values are set to -9999 for consistency with similar +% datasets. + +long = -179.75:0.5:179.75; +lat = 89.75:-0.5:-89.75; + +nr_files = size(fnames,1); + +del = cell(nr_files*12, 3); + +h = waitbar(0,'','Name','Reading progress...'); + +for i = 1:nr_files + + + + d = importdata(fnames{i}); + year = str2num(fnames{i}(end-3:end)); + + tmp = cell(12,1); + + for k = 1:12 + tmp{k,1} = zeros(360,720)-9999; + tmp{k,2} = k; + tmp{k,3} = year; + end + + for j = 1:length(d) + row_ind = find(lat == d(j,2)); + clm_ind = find(long == d(j,1)); + + tmp{1,1}(row_ind, clm_ind) = d(j,3); + tmp{2,1}(row_ind, clm_ind) = d(j,4); + tmp{3,1}(row_ind, clm_ind) = d(j,5); + tmp{4,1}(row_ind, clm_ind) = d(j,6); + tmp{5,1}(row_ind, clm_ind) = d(j,7); + tmp{6,1}(row_ind, clm_ind) = d(j,8); + tmp{7,1}(row_ind, clm_ind) = d(j,9); + tmp{8,1}(row_ind, clm_ind) = d(j,10); + tmp{9,1}(row_ind, clm_ind) = d(j,11); + tmp{10,1}(row_ind, clm_ind) = d(j,12); + tmp{11,1}(row_ind, clm_ind) = d(j,13); + tmp{12,1}(row_ind, clm_ind) = d(j,14); + + + end + + del((i-1)*12+1:i*12,1) = tmp(:,2); + del((i-1)*12+1:i*12,2) = tmp(:,3); + del((i-1)*12+1:i*12,3) = tmp(:,1); + clear tmp + + waitbar(i/nr_files, h, [int2str(i) '/' int2str(nr_files) ' files']) + sprintf([fnames{i}, '...Ok']) +end +close(h) + + \ No newline at end of file diff --git a/read_ecmwf.m b/read_ecmwf.m new file mode 100644 index 0000000..d25507b --- /dev/null +++ b/read_ecmwf.m @@ -0,0 +1,26 @@ +function ecmwf = read_ecmwf(fname, varname) + + +nr_files = length(fname); + +for i = 1:nr_files + + yr = str2num(fname{i}(1:4)); + mnth = str2num(fname{i}(5:6)); + + ncid = netcdf.open(fname{i}, 'nowrite'); + dta = netcdf.getvar(ncid, 3); + + ecmwf{i,1} = 'ECMWF'; + ecmwf{i,2} = varname; + ecmwf{i,3} = 1; + ecmwf{i,4} = mnth; + ecmwf{i,5} = yr; + ecmwf{i,6} = 'Global'; + ecmwf{i,7} = 89.75:-0.5:-89.75; + ecmwf{i,8} = -179.75:0.5:179.75; + ecmwf{i,9} = double(flipud(dta')); + ecmwf{i,10} = '[mm/month]'; + +end + \ No newline at end of file diff --git a/read_fluxes.m b/read_fluxes.m new file mode 100644 index 0000000..753c15a --- /dev/null +++ b/read_fluxes.m @@ -0,0 +1,235 @@ +function UVflx = read_fluxes(dataset) + + + + +if strcmp(dataset, 'ecmwf') + fname = '/media/storage/Data/Mflux/ECMWF/original/output.grib'; + varnme{1} = 'Vertical_integral_of_eastward_water_vapour_flux'; + varnme{2} = 'Vertical_integral_of_northward_water_vapour_flux'; + + mnth = 1; + yr = 1989; + + for i = 1:263 + UVflx{i,1} = mnth; + UVflx{i,2} = yr; + UVflx{i,3} = nj_varget(fname, varnme{1}, [i 1 1], [1 inf inf]); + UVflx{i,4} = nj_varget(fname, varnme{2}, [i 1 1], [1 inf inf]); + + mnth = mnth + 1; + + if mnth == 13 + mnth = 1; + yr = yr + 1; + end + end + + lon = nj_varget(fname, 'lon'); + lon_units = nj_attget(fname, 'lon', 'units'); + lon_axis = nj_attget(fname, 'lon', '_CoordinateAxisType'); + + lat = nj_varget(fname, 'lat'); + lat_units = nj_attget(fname, 'lat', 'units'); + lat_axis = nj_attget(fname, 'lat', '_CoordinateAxisType'); + + otnme_u{1} = 'ecmwf_uflx_jan.nc'; + otnme_u{2} = 'ecmwf_uflx_jul.nc'; + otnme_v{1} = 'ecmwf_vflx_jan.nc'; + otnme_v{2} = 'ecmwf_vflx_jul.nc'; +elseif strcmp(dataset, 'merra') + + mnth = 1; + yr = 1979; + + path = '/media/storage/Data/Mflux/MERRA/original/'; + varnme{1} = 'uflxqv'; + varnme{2} = 'vflxqv'; + + for i = 1:372 + if mnth < 10 + txtmnth = ['0', num2str(mnth)]; + else + txtmnth = num2str(mnth); + end + + if yr < 1993 + + fnme = [path, 'MERRA100.prod.assim.tavgM_2d_int_Nx.', ... + num2str(yr), txtmnth, '.SUB.nc']; + elseif yr >= 1993 && yr < 2001 + fnme = [path, 'MERRA200.prod.assim.tavgM_2d_int_Nx.', ... + num2str(yr), txtmnth, '.SUB.nc']; + elseif yr >= 2001 + fnme = [path, 'MERRA300.prod.assim.tavgM_2d_int_Nx.', ... + num2str(yr), txtmnth, '.SUB.nc']; + end + + UVflx{i,1} = mnth; + UVflx{i,2} = yr; + UVflx{i,3} = nj_varget(fnme, varnme{1}, [1 1 1], [1 inf inf]); + UVflx{i,4} = nj_varget(fnme, varnme{2}, [1 1 1], [1 inf inf]); + + mnth = mnth + 1; + + if mnth == 13 + mnth = 1; + yr = yr + 1; + end + + + end + + lon = nj_varget(fnme, 'longitude'); + lon_units = nj_attget(fnme, 'longitude', 'units'); + lon_axis = nj_attget(fnme, 'longitude', '_CoordinateAxisType'); + + lat = nj_varget(fnme, 'latitude'); + lat_units = nj_attget(fnme, 'latitude', 'units'); + lat_axis = nj_attget(fnme, 'latitude', '_CoordinateAxisType'); + + otnme_u{1} = 'merra_uflx_jan.nc'; + otnme_u{2} = 'merra_uflx_jul.nc'; + otnme_v{1} = 'merra_vflx_jan.nc'; + otnme_v{2} = 'merra_vflx_jul.nc'; + +elseif strcmp(dataset, 'cfsr') + + mnth = 1; + yr = 1979; + + fname{1} = '/media/storage/Data/Mflux/CFSR/original/CFSR.UQ.1979-2009.nc'; + fname{2} = '/media/storage/Data/Mflux/CFSR/original/CFSR.VQ.1979-2009.nc'; + + varnme{1} = 'UQ'; + varnme{2} = 'VQ'; + + for i = 1:372 + UVflx{i,1} = mnth; + UVflx{i,2} = yr; + UVflx{i,3} = nj_varget(fname{1}, varnme{1}, [i 1 1], [1 inf inf]); + UVflx{i,4} = nj_varget(fname{2}, varnme{2}, [i 1 1], [1 inf inf]); + + mnth = mnth + 1; + + if mnth == 13 + mnth = 1; + yr = yr + 1; + end + end + + lon = nj_varget(fname{1}, 'lon'); + lon_units = nj_attget(fname{1}, 'lon', 'units'); + lon_axis = nj_attget(fname{1}, 'lon', '_CoordinateAxisType'); + + lat = nj_varget(fname{1}, 'lat'); + lat_units = nj_attget(fname{1}, 'lat', 'units'); + lat_axis = nj_attget(fname{1}, 'lat', '_CoordinateAxisType'); + + otnme_u{1} = 'cfsr_uflx_jan.nc'; + otnme_u{2} = 'cfsr_uflx_jul.nc'; + otnme_v{1} = 'cfsr_vflx_jan.nc'; + otnme_v{2} = 'cfsr_vflx_jul.nc'; +end + +mn_u = comp_spat_mean(UVflx, [1989 2006], 'monthly_1', [1 2 3], -9999, 1); +mn_v = comp_spat_mean(UVflx, [1989 2006], 'monthly_1', [1 2 4], -9999, 1); + + +% Writing the U-field of January +ncid = netcdf.create(otnme_u{1}, 'NC_WRITE'); +lon_dim_id = netcdf.defDim(ncid, 'longitude', length(lon)); +lat_dim_id = netcdf.defDim(ncid, 'latitude', length(lat)); + +lon_var_id = netcdf.defVar(ncid, 'longitude', 'double', lon_dim_id); +lat_var_id = netcdf.defVar(ncid, 'latitude', 'double', lat_dim_id); + +data_var_id = netcdf.defVar(ncid, 'uflx', 'double', [lon_dim_id lat_dim_id]); +netcdf.endDef(ncid); + +netcdf.putVar(ncid, lon_var_id, lon); +netcdf.putVar(ncid, lat_var_id, lat); +netcdf.putVar(ncid, data_var_id, mn_u{1}'); + +netcdf.reDef(ncid) +netcdf.putAtt(ncid, lon_var_id, 'units', lon_units); +netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', lon_axis); +netcdf.putAtt(ncid, lat_var_id, 'units', lat_units); +netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', lat_axis); +netcdf.close(ncid); + +% Writing the U-field of July +ncid = netcdf.create(otnme_u{2}, 'NC_WRITE'); +lon_dim_id = netcdf.defDim(ncid, 'longitude', length(lon)); +lat_dim_id = netcdf.defDim(ncid, 'latitude', length(lat)); + +lon_var_id = netcdf.defVar(ncid, 'longitude', 'double', lon_dim_id); +lat_var_id = netcdf.defVar(ncid, 'latitude', 'double', lat_dim_id); + +data_var_id = netcdf.defVar(ncid, 'uflx', 'double', [lon_dim_id lat_dim_id]); +netcdf.endDef(ncid); + +netcdf.putVar(ncid, lon_var_id, lon); +netcdf.putVar(ncid, lat_var_id, lat); +netcdf.putVar(ncid, data_var_id, mn_u{7}'); + +netcdf.reDef(ncid) +netcdf.putAtt(ncid, lon_var_id, 'units', lon_units); +netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', lon_axis); +netcdf.putAtt(ncid, lat_var_id, 'units', lat_units); +netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', lat_axis); +netcdf.close(ncid); + + +% Writing the V-field of January +ncid = netcdf.create(otnme_v{1}, 'NC_WRITE'); +lon_dim_id = netcdf.defDim(ncid, 'longitude', length(lon)); +lat_dim_id = netcdf.defDim(ncid, 'latitude', length(lat)); + +lon_var_id = netcdf.defVar(ncid, 'longitude', 'double', lon_dim_id); +lat_var_id = netcdf.defVar(ncid, 'latitude', 'double', lat_dim_id); + +data_var_id = netcdf.defVar(ncid, 'vflx', 'double', [lon_dim_id lat_dim_id]); +netcdf.endDef(ncid); + +netcdf.putVar(ncid, lon_var_id, lon); +netcdf.putVar(ncid, lat_var_id, lat); +netcdf.putVar(ncid, data_var_id, mn_v{1}'); + +netcdf.reDef(ncid) +netcdf.putAtt(ncid, lon_var_id, 'units', lon_units); +netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', lon_axis); +netcdf.putAtt(ncid, lat_var_id, 'units', lat_units); +netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', lat_axis); +netcdf.close(ncid); + + + +% Writing the V-field of July +ncid = netcdf.create(otnme_v{2}, 'NC_WRITE'); +lon_dim_id = netcdf.defDim(ncid, 'longitude', length(lon)); +lat_dim_id = netcdf.defDim(ncid, 'latitude', length(lat)); + +lon_var_id = netcdf.defVar(ncid, 'longitude', 'double', lon_dim_id); +lat_var_id = netcdf.defVar(ncid, 'latitude', 'double', lat_dim_id); + +data_var_id = netcdf.defVar(ncid, 'vflx', 'double', [lon_dim_id lat_dim_id]); +netcdf.endDef(ncid); + +netcdf.putVar(ncid, lon_var_id, lon); +netcdf.putVar(ncid, lat_var_id, lat); +netcdf.putVar(ncid, data_var_id, mn_v{7}'); + +netcdf.reDef(ncid) +netcdf.putAtt(ncid, lon_var_id, 'units', lon_units); +netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', lon_axis); +netcdf.putAtt(ncid, lat_var_id, 'units', lat_units); +netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', lat_axis); +netcdf.close(ncid); + + + + + + + \ No newline at end of file diff --git a/read_gleam.m b/read_gleam.m new file mode 100644 index 0000000..915f9e1 --- /dev/null +++ b/read_gleam.m @@ -0,0 +1,58 @@ +function [] = read_gleam(fnames) + +for i = 1:length(fnames) + dta = importdata(fnames{i}); + + yr = fnames{i}(15:18); + mn = fnames{i}(19:20); + dy = fnames{i}(21:22); + + intdte = str2num(yr)*10000 + str2num(mn)*100 + str2num(dy); + + lat = 89.875:-0.25:-89.875; + lon = -179.875:0.25:179.875; + + dta(dta == -99) = -9999; + + outnme = [fnames{i}(1:end-3), 'nc']; + + + ncid = netcdf.create(outnme, 'NC_WRITE'); + + time_dim_id = netcdf.defDim(ncid, 'time', 1); + lon_dim_id = netcdf.defDim(ncid, 'lon', length(lon)); + lat_dim_id = netcdf.defDim(ncid, 'lat', length(lat)); + + time_var_id = netcdf.defVar(ncid, 'time', 'double', time_dim_id); + lon_var_id = netcdf.defVar(ncid, 'lon', 'double', lon_dim_id); + lat_var_id = netcdf.defVar(ncid, 'lat', 'double', lat_dim_id); + + data_var_id = netcdf.defVar(ncid, 'EVAP', 'double', ... + [lon_dim_id lat_dim_id time_dim_id]); + netcdf.endDef(ncid); + + netcdf.putVar(ncid, time_var_id, intdte); + netcdf.putVar(ncid, lon_var_id, lon); + netcdf.putVar(ncid, lat_var_id, lat); + netcdf.putVar(ncid, data_var_id, dta'); + + netcdf.reDef(ncid) + netcdf.putAtt(ncid, time_var_id, 'units', 'days in YYYYMMDD'); + netcdf.putAtt(ncid, time_var_id, '_CoordinateAxisType', 'Time'); + + netcdf.putAtt(ncid, lon_var_id, 'units', 'degrees_east'); + netcdf.putAtt(ncid, lon_var_id, 'long_name', 'Longitude'); + netcdf.putAtt(ncid, lon_var_id, '_CoordinateAxisType', 'Lon'); + + netcdf.putAtt(ncid, lat_var_id, 'units', 'degrees_north'); + netcdf.putAtt(ncid, lat_var_id, 'long_name', 'Latitude'); + netcdf.putAtt(ncid, lat_var_id, '_CoordinateAxisType', 'Lat'); + + netcdf.putAtt(ncid, data_var_id, 'units', 'mm/day'); + netcdf.putAtt(ncid, data_var_id, 'long_name', 'Evapotranspiration'); + netcdf.putAtt(ncid, data_var_id, 'missing_value', -9999); + + netcdf.close(ncid); +end + + \ No newline at end of file diff --git a/read_gpcc.m b/read_gpcc.m new file mode 100644 index 0000000..67dfcbc --- /dev/null +++ b/read_gpcc.m @@ -0,0 +1,52 @@ +function [gpcc_prec gpcc_ngaug] = read_gpcc(fnames) +% The function reads a file from GPCC and stores its values and the +% appropriate deviations in a structure variable + + +h = waitbar(0,'','Name','...% datafiles processed'); +nr_files = length(fnames); + +gpcc = cell(nr_files,4); + +for i = 1:nr_files + + d = importdata(fnames{i}, ' ', 14); + dte = fnames{i}(31:36); + mnth = str2num(dte(1:2)); + yr = str2num(dte(3:6)); +% fld = reshape(d.data(:,1), [720, 360]); +% ers = reshape(d.data(:,2), [720, 360]); + ngs = reshape(d.data(:,3), [720, 360]); + +% gpcc_prec{i,1} = 'GPCC v6.0'; +% gpcc_prec{i,2} = 'P'; +% gpcc_prec{i,3} = mnth; +% gpcc_prec{i,4} = yr; +% gpcc_prec{i,5} = 'Global'; +% gpcc_prec{i,6} = (89.75:-0.5:-89.75)'; +% gpcc_prec{i,7} = (-179.75:0.5:179.75)'; +% gpcc_prec{i,8} = fld'; +% gpcc_prec{i,9} = ers'; +% gpcc_prec{i,10} = '[mm/month]'; + + gpcc_ngaug{i,1} = 'GPCC v6.0'; + gpcc_ngaug{i,2} = 'Nr of Gauges'; + gpcc_ngaug{i,3} = mnth; + gpcc_ngaug{i,4} = yr; + gpcc_ngaug{i,5} = 'Global'; + gpcc_ngaug{i,6} = (89.75:-0.5:-89.75)'; + gpcc_ngaug{i,7} = (-179.75:0.5:179.75)'; + gpcc_ngaug{i,8} = ngs'; + gpcc_ngaug{i,9} = '[gauges/gridcell]'; + + + + waitbar(i/nr_files, h, [int2str((i*100)/nr_files) '%']) +end +gpcc_prec = 1; + +close(h) + + + + diff --git a/read_gpcp.m b/read_gpcp.m new file mode 100644 index 0000000..8003563 --- /dev/null +++ b/read_gpcp.m @@ -0,0 +1,46 @@ +function gpcp = read_gpcp(fnames) +% The function reads a file from GPCC and stores its values and the +% appropriate deviations in a structure variable + + +h = waitbar(0,'','Name','...% datafiles processed'); +nr_files = length(fnames); + +gpcp = []; + +for i = 1:nr_files + ncid = netcdf.open(fnames{i}, 'nowrite'); + year = str2num(fnames{i}(6:9)); + + sig = netcdf.getvar(ncid, 3); + err = netcdf.getvar(ncid, 4); + + for j = 1:size(sig,3) + + sig_tmp = double(flipud(sig(:,:,j)')); + err_tmp = double(flipud(err(:,:,j)')); + + tmp{j,1} = 'GPCPv2.1'; + tmp{j,2} = 'PREC'; + tmp{j,3} = 1; + tmp{j,4} = j; + tmp{j,5} = year; + tmp{j,6} = 'Global'; + tmp{j,7} = 89.75:-0.5:-89.75; + tmp{j,8} = -179.75:0.5:179.75; + tmp{j,9} = sig_tmp; + tmp{j,10} = err_tmp; + tmp{j,11} = '[mm/month]'; + + end + + gpcp = [gpcp; tmp]; + netcdf.close(ncid) + clear tmp + waitbar(i/nr_files, h, [int2str((i*100)/nr_files) '%']) +end +close(h) + + + + diff --git a/read_ism.m b/read_ism.m new file mode 100644 index 0000000..2768a7b --- /dev/null +++ b/read_ism.m @@ -0,0 +1,401 @@ +function [dta, qf] = read_ism(dtadir, gauge, sensor, subset, period, pltflg, tscale) + + +% Format of ISMN-data +frmt = '%4u %*1c %2u %*1c %2u %3u %*3c %9f %*2c'; + +if strcmp(tscale, 'monthly') + if strcmp(subset, 'scan') + % Construct the filenames + fnme{1} = [dtadir, gauge, '/SCAN_SCAN_', gauge, '_sm_0.050800_0.050800_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{2} = [dtadir, gauge, '/SCAN_SCAN_', gauge, '_sm_0.101600_0.101600_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{3} = [dtadir, gauge, '/SCAN_SCAN_', gauge, '_sm_0.203200_0.203200_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{4} = [dtadir, gauge, '/SCAN_SCAN_', gauge, '_sm_0.508000_0.508000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{5} = [dtadir, gauge, '/SCAN_SCAN_', gauge, '_sm_1.016000_1.016000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + + % Get some data... + for i = 1:5 + fprintf('Reading file %s \n', fnme{i}) + [yr, mnth, day, hr, fld(:, i)] = textread(fnme{i}, frmt, -1, 'headerlines', 1); + end + + intym = yr*100 + mnth; + intym_unique = unique(intym); + + for i = 1:length(intym_unique) + + indx = find(intym == intym_unique(i)); + dta(i,1) = rem(intym_unique(i), 100); + dta(i,2) = floor(intym_unique(i)/100); + dta(i,3) = datenum(floor(intym_unique(i)/100), rem(intym_unique(i), 100), 15); + + qf(i,1) = rem(intym_unique(i), 100); + qf(i,2) = floor(intym_unique(i)/100); + qf(i,3) = datenum(floor(intym_unique(i)/100), rem(intym_unique(i), 100), 15); + + dta(i,4:8) = nanmean(fld(indx, :)); + + + length_nan(1,1) = sum(isnan(fld(indx, 1))); + length_nan(1,2) = sum(isnan(fld(indx, 2))); + length_nan(1,3) = sum(isnan(fld(indx, 3))); + length_nan(1,4) = sum(isnan(fld(indx, 4))); + length_nan(1,5) = sum(isnan(fld(indx, 5))); + qf(i,4:8) = (length(indx) - length_nan)./length(indx); + + for j = 1:5 + if qf(i, j+3) < 0.8 + fprintf('Less than 80 percent of data for TS %u and depth %u \n', intym_unique(i), j) + end + end + end + + if pltflg + + clr = [ 60 60 60; + 050 136 189; + 244 109 67; + 026 152 080; + 240 130 40; + 0 200 200; + 230 220 50; + 160 0 200; + 160 230 50; + 0 160 255; + 240 0 130; + 230 175 45; + 0 210 140; + 130 0 220]/255; + + + figure + tlt = ['Soil moisture vs. data availability at ', gauge]; + title(tlt, 'fontsize', 12); hold on; + + plot(dta(:,3), dta(:,4), 'Color', clr(1,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,5), 'Color', clr(2,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,6), 'Color', clr(3,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,7), 'Color', clr(4,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,8), 'Color', clr(5,:), 'Linewidth', 1.5); + + plot(dta(:,3), qf(:,4), '--', 'Color', clr(1,:), 'Linewidth', 1.5); + plot(dta(:,3), qf(:,5), '--', 'Color', clr(2,:), 'Linewidth', 1.5); + plot(dta(:,3), qf(:,6), '--', 'Color', clr(3,:), 'Linewidth', 1.5); + plot(dta(:,3), qf(:,7), '--', 'Color', clr(4,:), 'Linewidth', 1.5); + plot(dta(:,3), qf(:,8), '--', 'Color', clr(5,:), 'Linewidth', 1.5); + + ylim([0 1]); + datetick('x', 'yyyy') + pbaspect([3 1 1]) + legend('5cm', '10cm', '20cm', '50cm', '100cm'); + + end + + elseif strcmp(subset, 'icn') + if strcmp(gauge, 'OrrCenter(Perry)') + fnme{1} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.000000_0.100000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{2} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.100000_0.300000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{3} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.300000_0.500000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{4} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.500000_0.700000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{5} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.700000_0.900000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{6} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.900000_1.100000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + + depth = [10, 20, 20, 20, 20, 20]; + + else + fnme{1} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.100000_0.300000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{2} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.300000_0.500000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{3} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.500000_0.700000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{4} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.700000_0.900000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{5} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.900000_1.100000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{6} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_1.100000_1.300000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{7} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_1.300000_1.500000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{8} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_1.500000_1.700000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{9} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_1.700000_1.900000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{10} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_1.900000_2.000000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + + depth = [20, 20, 20, 20, 20, 20, 20, 20, 20, 10]; + end + + % Get some data... + for i = 1:length(fnme) + fprintf('Reading file %s \n', fnme{i}) + [yr, mnth, day, hr, fld(:, i)] = textread(fnme{i}, frmt, -1, 'headerlines', 1); + end + + intym = yr*100 + mnth; + intym_unique = unique(intym); + + for i = 1:length(intym_unique) + indx = find(intym == intym_unique(i)); + dta(i,1) = rem(intym_unique(i), 100); + dta(i,2) = floor(intym_unique(i)/100); + dta(i,3) = datenum(floor(intym_unique(i)/100), rem(intym_unique(i), 100), 15); + + dta(i,4:4+length(fnme)-1) = nanmean(fld(indx, :)); + end + + if pltflg + + clr = [ 60 60 60; + 050 136 189; + 244 109 67; + 026 152 080; + 240 130 40; + 0 200 200; + 230 220 50; + 160 0 200; + 160 230 50; + 0 160 255; + 240 0 130; + 230 175 45; + 0 210 140; + 130 0 220]/255; + figure + tlt = ['Soil moisture at ', gauge]; + title(tlt, 'fontsize', 12); hold on; + + if strcmp(gauge, 'OrrCenter(Perry)') + plot(dta(:,3), dta(:,4), 'Color', clr(1,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,5), 'Color', clr(2,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,6), 'Color', clr(3,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,7), 'Color', clr(4,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,8), 'Color', clr(5,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,5), 'Color', clr(6,:), 'Linewidth', 1.5); + + ylim([0 1]); + datetick('x', 'yyyy') + pbaspect([3 1 1]) + legend('0-10cm', '10-30cm', '30-50cm', '50-70cm', '70-90cm', '90-110cm'); + else + + plot(dta(:,3), dta(:,4), 'Color', clr(1,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,5), 'Color', clr(2,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,6), 'Color', clr(3,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,7), 'Color', clr(4,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,8), 'Color', clr(5,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,9), 'Color', clr(6,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,10), 'Color', clr(7,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,11), 'Color', clr(8,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,12), 'Color', clr(9,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,13), 'Color', clr(10,:), 'Linewidth', 1.5); + + ylim([0 1]); + datetick('x', 'yyyy') + pbaspect([3 1 1]) + legend('10-30cm', '30-50cm', '50-70cm', '70-90cm', '90-110cm', '110-130cm', '130-150cm', '150-170cm', '170-190cm', '190-200cm'); + + + + + end + end + qf = 1; + end + +elseif strcmp(tscale, 'daily') + if strcmp(subset, 'scan') + % Construct the filenames + fnme{1} = [dtadir, gauge, '/SCAN_SCAN_', gauge, '_sm_0.050800_0.050800_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{2} = [dtadir, gauge, '/SCAN_SCAN_', gauge, '_sm_0.101600_0.101600_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{3} = [dtadir, gauge, '/SCAN_SCAN_', gauge, '_sm_0.203200_0.203200_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{4} = [dtadir, gauge, '/SCAN_SCAN_', gauge, '_sm_0.508000_0.508000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{5} = [dtadir, gauge, '/SCAN_SCAN_', gauge, '_sm_1.016000_1.016000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + + % Get some data... + for i = 1:5 + fprintf('Reading file %s \n', fnme{i}) + [yr, mnth, day, hr, fld(:, i)] = textread(fnme{i}, frmt, -1, 'headerlines', 1); + end + + intymd = yr*10000 + mnth*100 + day; + intymd_unique = unique(intymd); + + for i = 1:length(intymd_unique) + + indx = find(intymd == intymd_unique(i)); + + tmp_dte = num2str(intymd_unique(i)); + dta(i,1) = str2num(tmp_dte(7:8)); + dta(i,2) = str2num(tmp_dte(5:6)); + dta(i,3) = str2num(tmp_dte(1:4)); + dta(i,4) = datenum(dta(i,3), dta(i,2), dta(i,1)); + + qf(i, 1:4) = dta(i, 1:4); + + + dta(i,5:9) = nanmean(fld(indx, :)); + + + length_nan(1,1) = sum(isnan(fld(indx, 1))); + length_nan(1,2) = sum(isnan(fld(indx, 2))); + length_nan(1,3) = sum(isnan(fld(indx, 3))); + length_nan(1,4) = sum(isnan(fld(indx, 4))); + length_nan(1,5) = sum(isnan(fld(indx, 5))); + qf(i,5:9) = (length(indx) - length_nan)./length(indx); + + for j = 1:5 + if qf(i, j+4) < 0.8 + fprintf('Less than 80 percent of data for TS %u and depth %u \n', intymd_unique(i), j) + end + end + end + + if pltflg + + clr = [ 60 60 60; + 050 136 189; + 244 109 67; + 026 152 080; + 240 130 40; + 0 200 200; + 230 220 50; + 160 0 200; + 160 230 50; + 0 160 255; + 240 0 130; + 230 175 45; + 0 210 140; + 130 0 220]/255; + + + figure + tlt = ['Soil moisture vs. data availability at ', gauge]; + title(tlt, 'fontsize', 12); hold on; + + plot(dta(:,4), dta(:,5), 'Color', clr(1,:), 'Linewidth', 1.5); + plot(dta(:,4), dta(:,6), 'Color', clr(2,:), 'Linewidth', 1.5); + plot(dta(:,4), dta(:,7), 'Color', clr(3,:), 'Linewidth', 1.5); + plot(dta(:,4), dta(:,8), 'Color', clr(4,:), 'Linewidth', 1.5); + plot(dta(:,4), dta(:,9), 'Color', clr(5,:), 'Linewidth', 1.5); + + plot(dta(:,4), qf(:,5), '--', 'Color', clr(1,:), 'Linewidth', 1.5); + plot(dta(:,4), qf(:,6), '--', 'Color', clr(2,:), 'Linewidth', 1.5); + plot(dta(:,4), qf(:,7), '--', 'Color', clr(3,:), 'Linewidth', 1.5); + plot(dta(:,4), qf(:,8), '--', 'Color', clr(4,:), 'Linewidth', 1.5); + plot(dta(:,4), qf(:,9), '--', 'Color', clr(5,:), 'Linewidth', 1.5); + + ylim([0 1]); + datetick('x', 'yyyy') + pbaspect([3 1 1]) + legend('5cm', '10cm', '20cm', '50cm', '100cm'); + + end + elseif strcmp(subset, 'icn') + if strcmp(gauge, 'OrrCenter(Perry)') + fnme{1} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.000000_0.100000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{2} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.100000_0.300000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{3} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.300000_0.500000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{4} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.500000_0.700000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{5} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.700000_0.900000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{6} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.900000_1.100000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + + depth = [10, 20, 20, 20, 20, 20]; + + else + fnme{1} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.100000_0.300000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{2} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.300000_0.500000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{3} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.500000_0.700000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{4} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.700000_0.900000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{5} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_0.900000_1.100000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{6} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_1.100000_1.300000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{7} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_1.300000_1.500000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{8} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_1.500000_1.700000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{9} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_1.700000_1.900000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + fnme{10} = [dtadir, gauge, '/ICN_ICN_', gauge, '_sm_1.900000_2.000000_', sensor, '_', num2str(period(1)), '_', num2str(period(2)), '.stm']; + + depth = [20, 20, 20, 20, 20, 20, 20, 20, 20, 10]; + end + + % Get some data... + for i = 1:length(fnme) + fprintf('Reading file %s \n', fnme{i}) + [yr, mnth, day, hr, fld(:, i)] = textread(fnme{i}, frmt, -1, 'headerlines', 1); + end + + intymd = yr*10000 + mnth*100 + day; + intymd_unique = unique(intymd); + keyboard + for i = 1:length(intymd_unique) + + indx = find(intymd == intymd_unique(i)); + + tmp_dte = num2str(intymd_unique(i)); + dta(i,1) = str2num(tmp_dte(7:8)); + dta(i,2) = str2num(tmp_dte(5:6)); + dta(i,3) = str2num(tmp_dte(1:4)); + dta(i,4) = datenum(dta(i,3), dta(i,2), dta(i,1)); + + dta(i,5:4+length(fnme)-1) = nanmean(fld(indx, :)); + + + end + + if pltflg + + clr = [ 60 60 60; + 050 136 189; + 244 109 67; + 026 152 080; + 240 130 40; + 0 200 200; + 230 220 50; + 160 0 200; + 160 230 50; + 0 160 255; + 240 0 130; + 230 175 45; + 0 210 140; + 130 0 220]/255; + figure + tlt = ['Soil moisture at ', gauge]; + title(tlt, 'fontsize', 12); hold on; + + if strcmp(gauge, 'OrrCenter(Perry)') + plot(dta(:,3), dta(:,4), 'Color', clr(1,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,5), 'Color', clr(2,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,6), 'Color', clr(3,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,7), 'Color', clr(4,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,8), 'Color', clr(5,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,5), 'Color', clr(6,:), 'Linewidth', 1.5); + + ylim([0 1]); + datetick('x', 'yyyy') + pbaspect([3 1 1]) + legend('0-10cm', '10-30cm', '30-50cm', '50-70cm', '70-90cm', '90-110cm'); + else + + plot(dta(:,3), dta(:,4), 'Color', clr(1,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,5), 'Color', clr(2,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,6), 'Color', clr(3,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,7), 'Color', clr(4,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,8), 'Color', clr(5,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,9), 'Color', clr(6,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,10), 'Color', clr(7,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,11), 'Color', clr(8,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,12), 'Color', clr(9,:), 'Linewidth', 1.5); + plot(dta(:,3), dta(:,13), 'Color', clr(10,:), 'Linewidth', 1.5); + + ylim([0 1]); + datetick('x', 'yyyy') + pbaspect([3 1 1]) + legend('10-30cm', '30-50cm', '50-70cm', '70-90cm', '90-110cm', '110-130cm', '130-150cm', '150-170cm', '170-190cm', '190-200cm'); + + + + + end + end + qf = 1; + end + + + + end +end + + + + + + diff --git a/read_merra.m b/read_merra.m new file mode 100644 index 0000000..be75bc4 --- /dev/null +++ b/read_merra.m @@ -0,0 +1,66 @@ +function merra = read_merra(fnames, variable) + + +nr_files = size(fnames,1); + +if strcmp(variable, 'PREC') + nc_var = 'prectot' + unit = '[mm/month]'; +elseif strcmp(variable, 'T2') + nc_var = 't2m'; + unit = '[°C]'; +elseif strcmp(variable, 'TS') + nc_var = 'ts'; + unit = '[°K]'; +elseif strcmp(variable, 'TQV') + nc_var = 'tqv'; + unit = '[mm]'; +elseif strcmp(variable, 'TQI') + nc_var = 'tqi'; + unit = '[mm]'; +elseif strcmp(variable, 'TQL') + nc_var = 'tql'; + unit = '[mm]'; +end + + +merra = cell(nr_files,10); + +h = waitbar(0,'','Name','Reading progress...'); + +for i = 1:nr_files + ncid = netcdf.open(fnames{i}, 'nowrite'); + year = fnames{i}(end-12:end-9); + month = fnames{i}(end-8:end-7); +% keyboard + nr_days = daysinmonth(str2num(month), str2num(year)); + + varid = netcdf.inqVarID(ncid, nc_var); + tmp = netcdf.getvar(ncid, varid); + netcdf.close(ncid) + + tmp = double(tmp); + tmp = flipud(tmp'); + + if strcmp(variable, 'PREC') + tmp = tmp * 3600 * 24 * nr_days; + end + +% tmp = resizem(tmp, [360, 720], 'bilinear'); + + merra{i,1} = 'MERRA'; + merra{i,2} = variable; + merra{i,3} = 1; + merra{i,4} = str2num(month); + merra{i,5} = str2num(year); + merra{i,6} = 'Global'; +% merra{i,7} = 89.75:-0.5:-89.75; +% merra{i,8} = -179.75:0.5:179.75; + merra{i,9} = tmp; +% merra{i,10} = unit; + + waitbar(i/nr_files, h, [int2str(i) '/' int2str(nr_files) ' files']) + sprintf([fnames{i}, '...Ok']) +end + +close(h) \ No newline at end of file diff --git a/read_netcdf.m b/read_netcdf.m new file mode 100644 index 0000000..05b169a --- /dev/null +++ b/read_netcdf.m @@ -0,0 +1,20 @@ +function file_vars = read_netcdf(fname) + +ncid = netcdf.open(fname, 'nowrite') + +% Read the number of variables and global attributes +[ndims,nvars,ngatts,unlimdimid] = netcdf.inq(ncid); + + +% Read the name, etc. of the different variables +for i = 0:nvars-1 + [file_vars{i+1,1}, xtype, dimids, numatts] = netcdf.inqVar(ncid,i); + file_vars{i+1,2} = netcdf.getVar(ncid,i); + + for j = 0:numatts-1 + file_vars{i+1,3}{j+1,1} = netcdf.inqAttName(ncid,i,j); + file_vars{i+1,3}{j+1,2} = netcdf.getAtt(ncid,i, ... + file_vars{i+1,3}{j+1,1}); + end +end + \ No newline at end of file diff --git a/read_ssmis_averaged_v7.m b/read_ssmis_averaged_v7.m new file mode 100644 index 0000000..0dea98d --- /dev/null +++ b/read_ssmis_averaged_v7.m @@ -0,0 +1,62 @@ +% read_ssmis_averaged_v7 +% +% this subroutine reads uncompressed SSMIS data from Remote Sensing Systems' +% binary format (version-7 released October 2010) +% +% The averaged files can be 3-Day, weekly, or monthly time composites (averages). +% These averaged time composite files all share the same data file format. +% File name format is Fss_yyyymmddv7_d3d for 3-day (average of 3 days ending on file date) +% Fss_yyyymmddv7 for weekly (start sunday, end saturday, named by saturday date) +% Fss_yyyymmv7 for monthly +% +% input arguments: +% data_file = the full path and name of the uncompressed data file +% +% the function returns these products: +% wind = wind speed in meters/second +% vapor = atmospheric water vapor in millimeters +% cloud = liquid cloud water in millimeters +% rain = rain rate in millimeters/hour +% +% longitude is 0.25*xdim- 0.125 +% latitude is 0.25*ydim-90.125 +% +% +% For detailed data description, see +% http://www.remss.com/ssmi/ssmis_data_description.html +% +% Remote Sensing Systems +% support@remss.com + + +function [wind,vapor,cloud,rain] = read_ssmis_averaged_v7(data_file) + +scale=[.2 .3 .01 .1]; +offset=[0.,0.,0.05,0.] +xdim=1440;ydim=720;numvar=4; +mapsiz=xdim*ydim; + +if ~exist(data_file) + error = ['file not found: ', data_file] + wind=[];vapor=[];cloud=[];rain=[]; + return; +end; + +fid=fopen(data_file,'rb'); +data=fread(fid,mapsiz*numvar,'uint8'); +fclose(fid); +data_file +map=reshape(data,[xdim ydim numvar]); + +for i=1:numvar + tmp=map(:,:,i); + ia=find(tmp<=250);tmp(ia)=tmp(ia)*scale(i)-offset(i); + map(:,:,i)=tmp; +end; + +wind = map(:,:,1); +vapor = map(:,:,2); +cloud = map(:,:,3); +rain = map(:,:,4); + +return; diff --git a/read_tmi_averaged_v4.m b/read_tmi_averaged_v4.m new file mode 100644 index 0000000..aabab0d --- /dev/null +++ b/read_tmi_averaged_v4.m @@ -0,0 +1,62 @@ + +% this subroutine reads RSS TMI averaged byte maps (Version-4 released September 2006). +% The averaged files include: 3-Day, weekly, and monthly time composites. +% The averaged time composite files all share the same data format. +% +% File name format is tmi_yyyymmddv4_d3d for 3-day (average of 3 days ending on file date) +% tmi_yyyymmddv4 for weekly (start sunday, end saturday, named by saturday date) +% tmi_yyyymmv4 for monthly +% +% input arguments: +% data_file = the full path and name to the uncompressed data file +% +% the function returns these products: +% sst = sea surface temperature in deg C +% wind11 = wind speed derived using 11 GHz channel in meters/second +% wind37 = wind speed derived using 37 GHz channel in meters/second +% vapor = atmospheric water vapor in millimeters +% cloud = liquid cloud water in millimeters +% rain = rain rate in millimeters/hour +% +% longitude is 0.25*xdim- 0.125 +% latitude is 0.25*ydim-40.125 +% +% For detailed data description, see +% http://www.remss.com/tmi/tmi_description.html +% Remote Sensing Systems +% support@remss.com + + +function [sst,wind11,wind37,vapor,cloud,rain]=read_tmi_averaged_v4(data_file) + +scale=[.15 .2 .2 .3 .01 .1]; +offset=[-3 0 0 0 0 0]; +xdim=1440;ydim=320;numvar=6; +mapsiz=xdim*ydim; + +if ~exist(data_file) + error = ['file not found: ', data_file] + sst=[];wind11=[];wind37=[];vapor=[];cloud=[];rain=[]; + return; +end; + +fid=fopen(data_file,'rb'); +data=fread(fid,mapsiz*numvar,'uint8'); +fclose(fid); +data_file +map=reshape(data,[xdim ydim numvar]); + +for i=1:numvar + tmp=map(:,:,i); + ia=find(tmp<=250);tmp(ia)=tmp(ia)*scale(i)+offset(i); + map(:,:,i)=tmp; +end; + +sst = map(:,:,1); +wind11 = map(:,:,2); +wind37 = map(:,:,3); +vapor = map(:,:,4); +cloud = map(:,:,5); +rain = map(:,:,6); + +return; \ No newline at end of file diff --git a/read_usgs.m b/read_usgs.m new file mode 100644 index 0000000..00b615c --- /dev/null +++ b/read_usgs.m @@ -0,0 +1,12 @@ +function usgs = read_usgs(fname) + +d = importdata(fname, '\t', 37); +data = d.data; + +mnths = data(:, 5); +yrs = data(:, 4); +numdte = datenum(yrs, mnths, 15); + +usgs = [0 0 0 data(1,1); mnths, yrs, numdte, data(:, 6)]; + + diff --git a/read_wrf_prec.m b/read_wrf_prec.m new file mode 100644 index 0000000..db9404f --- /dev/null +++ b/read_wrf_prec.m @@ -0,0 +1,25 @@ +function rain = read_wrf_prec(fnames, rtpe, sm, sy) + + +for i = 1:length(fnames)-1 + tmp1 = nj_varget(fnames{i}, rtpe, [1, 1, 1], [1 inf inf]); + tmp2 = nj_varget(fnames{i+1}, rtpe, [1, 1, 1], [1 inf inf]); + rain{i,1} = sm; + rain{i,2} = sy; + rain{i,3} = tmp2 - tmp1; + + sm = sm + 1; + if sm == 13 + sm = 1; + sy = sy + 1; + end + + +end + +tmp1 = nj_varget(fnames{i+1}, rtpe, [1, 1, 1], [1 inf inf]); +tmp2 = nj_varget(fnames{i+1}, rtpe, [inf, 1, 1], [1 inf inf]); + +rain{i+1,1} = sm + 1; +rain{i+1,2} = sy + 1; +rain{i+1,3} = tmp2 - tmp1; diff --git a/recon_eeofs.m b/recon_eeofs.m new file mode 100644 index 0000000..09f9680 --- /dev/null +++ b/recon_eeofs.m @@ -0,0 +1,24 @@ +function F_recon = recon_eeofs(eeofs, pcs, lags, recon_mode) + +nts = size(pcs, 1) + max(lags); +nr_lags = length(lags); +npts = size(eeofs, 1)/nr_lags; + +% keyboard +pcs_md = [zeros(max(lags), recon_mode); pcs(:, 1:recon_mode); zeros(max(lags), recon_mode)]; +eeofs_md = zeros(nr_lags*recon_mode, npts); +bigmat = zeros(nts, nr_lags*recon_mode); + +for i = 1:nr_lags + bigmat(:, (i-1)*recon_mode+1:i*recon_mode) = ... + pcs_md(max(lags)+1-lags(i):size(pcs_md, 1)-lags(i), 1:recon_mode); + eeofs_md((i-1)*recon_mode+1:i*recon_mode, :) = eeofs((i-1)*npts+1:i*npts, 1:recon_mode)'; +end + +div = zeros(size(bigmat)); +div(bigmat ~= 0) = 1; +div = 1./(sum(div, 2)/recon_mode); +F_recon = repmat(div, 1, npts).*(bigmat*eeofs_md); + + + \ No newline at end of file diff --git a/reconeof.m b/reconeof.m new file mode 100644 index 0000000..b8a4c93 --- /dev/null +++ b/reconeof.m @@ -0,0 +1,24 @@ +function [eof_s, data_s] = reconeof(eofs, pcs, c_indx, mxmde, fsze) + + + +for i = 1:mxmde + fld = zeros(fsze(1)*fsze(2),1); + fld(c_indx) = eofs(:,i); + + eof_s{1,i} = reshape(fld, fsze(1), fsze(2)); + + tmp = eofs*pcs(i,:)'; + + fld = zeros(fsze(1)*fsze(2),1); + fld(c_indx) = tmp; + data_s{1,i} = reshape(fld, fsze(1), fsze(2)); + + + +end + + + + + diff --git a/reg_flt.m b/reg_flt.m new file mode 100644 index 0000000..36dcc8b --- /dev/null +++ b/reg_flt.m @@ -0,0 +1,269 @@ +function [klm_f] = reg_flt(klm, P, K, cmpstrct, sig, ccrit, l01rem, pthflnme); + +% The function filters a set of colombo ordererd sh-coefficients klm +% according to the regularization approach with their inverse covariance +% matrix P and a constraining function K. K can be given as two parameters +% of a power-law type signal covariance, as a c\s, s|c, row- or column +% vector or as a full (lmax + 1)^2 x (lmax + 1)^2 matrix. +% The input data klm and P can be provided through a path and filename +% where the .mat-file are stored. +% +% The weight factors between the covariance matrix and the signal +% covariance matrix can be provided through the input parameter sig in a +% column-vector. If this vector is empty, the factors are computed by the +% function itself according to a variance component estimation, which is +% iterated until the convergence criterion ccrit is reached. +% +% The filtered coefficients and their covariance matrix as well as the +% optionally estimated variance components can be stored in an arbitrary +% file, defined through the structure-variable pthflnme. Otherwise, the +% results are copied to the working space. +% +%-------------------------------------------------------------------------- +% Input: klm unfiltered sh-coefficients in c\s-, s|c-, or +% colombo ordered row- or column-vector format +% klm can be a structure variable with the elements +% klm.path -> path where the input data is stored +% klm.fname -> filename (without .mat extension) of +% the input data +% +% P inverse colombo ordered covariance matrix of the +% input coefficients klm +% P can be a structure variable with the elements +% P.path -> path where the covariance matrix is +% stored +% P.fname -> filename (without .mat extension) of +% the input covariance matrix +% +% K Vector/matrix which contains either the two +% polynomial coefficients of a fitting power law or +% the coefficients of an arbitrary signal covariance +% model in c\s-, s|c-, or colombo ordered row- or +% column-vector format or in a full +% (lmax + 1)^2 x (lmax + 1)^2 matrix. +% +% cmpstrct String-variable which defines the treatment of the +% covariance structure: +% - 'full' -> P is assumed as a full covariance +% matrix, all correlations between degrees and +% orders are considered (might take long time for +% computation) +% - 'block' -> P is considered as a block-diagonal +% matrix, only correlations between coefficients +% of the same order are considered (default) +% - 'diag' -> P is considered as a diagonal +% matrix, no correlations are considered +% +% sig Variance components of the inverse covariance +% matrix P sig(1,1) and the signal covariance +% K sig(1,2). If sig is an empty array, the variance +% components are computed through a variance +% component estimation by the function itself +% (default: sig = []) +% +% ccrit The parameter ccrit is the convergence criterion +% of the variance component estimation, if no +% variance components are provided +% (default: ccrit = 10^-8) +% +% l01rem If GRACE data is used, it is usual to remove the +% degree 0,1 coefficients. If one wants to keep +% these coefficients, l01rem has to be set to 0 +% (default: l01rem = 1) +% +% pthflnme structure variable which defines the path and +% filename place where the output data klm_f, Qx, K +% and sig should be stored: +% pthflnme.path -> path +% pthflnme.fname -> filename +% if pthflnme is an empty array, the data will be +% copiped to the working space +% +% +% Output: klm_f filtered sh-coefficients which are arranged in the +% same format as the input coefficients +% +% Qx covariance matrix of the filtered coefficients +% +% F matrix with filter coefficients +% +% sig variance components of the covariance matrix +% (sig(1,1)) and the signal covariance (sig(1,2)) +% +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: 10.01.2010 +%-------------------------------------------------------------------------- +% Uses: mod01.m, P_ord2.m, vce.m, chkfrmt.m, plwmtrx.m +%-------------------------------------------------------------------------- + +% Checking the input arguments and setting the default parameters +if nargin < 8 + pthflnme = []; +end + +if nargin < 7 + l01rem = 1; +end + +if nargin < 6 + ccrit = 10^-8; +end + +if nargin < 5 + sig = []; +end + +if nargin < 4 + cmpstrct = 'block'; +end + +% Loading the input data, if a filename is provided +if isstruct(klm) + fname = [klm.path, '/', klm.fname, '.mat']; + clear klm + klm = importdata(fname); + clear fname +end + +if isstruct(P) + fname = [P.path, '/', P.fname, '.mat']; + clear P + P = importdata(fname); + clear fname +end + +% Size of the input data +[rp, cp] = size(P); +[rk, ck] = size(K); +[r, c] = size(klm); + +% Checking the input format of the sh-coefficients +[klmfrmt, lmax, klm] = chkfrmt(klm, 'rvec'); + +% Checking the format of the covariance matrix P, +if strcmp(cmpstrct, 'diag') + [cvrfrmt, lmx, P] = chkfrmt(P, 'rvec'); + clear lmx +end +% Computing the signal covariance +if size(K) == [1,2] | size(K) == [2,1] + + K = plwmtrx(K(1), K(2), lmax, 'vec', 'rms_1'); +else + [tmp1 tmp2 K] = chkfrmt(K, 'rvec'); + clear tmp* +end + +if strcmp(cmpstrct, 'full') + K = diag(K); +end + +% Removing the degree 0,1 coefficients from the datasets +if l01rem == 1 + klm = mod01(klm, 'rem'); + P = mod01(P, 'rem'); + K = mod01(K, 'rem'); +end + +% If no variance components are provided, the function computes the +% appropriate weight factors itself +if isempty(sig) + clear sig + fprintf('Performing VCE... \n \n') + [vcmpts, sig] = vce(klm, P, K, ccrit, cmpstrct, lmax); + fprintf('Done! \n \n') +end + +fprintf('Filtering the input coefficients.... ') + +if strcmp(cmpstrct, 'block') + + P_m = P_ord2(P, -1, 'cell'); + K_m = P_ord2(diag(K), -1, 'cell'); + klm_m = P_ord2(diag(klm), -1, 'cell'); + + Qx{1,1} = inv(1/sig(1)*P_m{1,1} + 1/sig(2)*K_m{1,1}); + Qx{1,2} = zeros(size(Qx{1,1})); + + F{1,1} = Qx{1,1}*1/sig(1)*P_m{1,1}; + F{1,2} = zeros(size(F{1,1})); + + klm_f{1,1} = diag(F{1,1}*diag(klm_m{1,1})); + klm_f{1,2} = diag(zeros(size(klm_f{1,1}))); + + + for m = 1:lmax + Qx{m+1,1} = inv(1/sig(1)*P_m{m+1,1} + 1/sig(2)*K_m{m+1,1}); + Qx{m+1,2} = inv(1/sig(1)*P_m{m+1,2} + 1/sig(2)*K_m{m+1,2}); + + F{m+1,1} = Qx{m+1,1}*1/sig(1)*P_m{m+1,1}; + F{m+1,2} = Qx{m+1,2}*1/sig(1)*P_m{m+1,2}; + + klm_f{m+1,1} = diag(F{m+1,1}*diag(klm_m{m+1,1})); + klm_f{m+1,2} = diag(F{m+1,2}*diag(klm_m{m+1,2})); + end + + Qx = blkdiag(Qx{:,1}, Qx{2:end,2}); + F = blkdiag(F{:,1}, F{2:end,2}); + klm_f = diag(blkdiag(klm_f{:,1}, klm_f{2:end,2})); + +elseif strcmp(cmpstrct, 'diag') + + Qx = 1./(1/sig(1)*P + 1/sig(2)*K); + F = Qx*1/sig(1).*P; + klm_f = F.*klm; + +elseif strcmp(cmpstrct, 'full') + + Qx = inv(1/sig(1)*P + 1/sig(2)*K); + F = Qx*1/sig(1)*P; + klm_f = F*klm; + +end + +% If degree 0,1 coefficients were removed, they will be added here again as +% zeros +if l01rem == 1 + klm_f = mod01(klm_f, 'add'); + Qx = mod01(Qx, 'add'); + F = mod01(F, 'add'); +end + +% If only variances were considered, the resulting errors and filter +% coefficients are arranged in the same format as the sh-coefficients +if strcmp(cmpstrct, 'diag') + [tmp1, tmp2, Qx] = chkfrmt(Qx, klmfrmt); + [tmp1, tmp2, F] = chkfrmt(F, klmfrmt); +end + +% Rearranging the filtered sh-coefficients in the input format +if strcmp(klmfrmt, 'rvec') == 0 + [frmt lmx klm_f] = chkfrmt(klm_f, klmfrmt); +end + +fprintf('Done! \n') + +% If no specific output path and filename were provided, the output data is +% copied to the workspace for further computations +if isempty(pthflnme) + assignin('base', 'Qx', Qx); + assignin('base', 'F', F); + assignin('base', 'sig', sig); +else + fnamef = [pthflnme.path, '/', pthflnme.fname]; + save(fnamef, 'klm_f', 'Qx', 'F'); +end + + + + + + + + + + + + + diff --git a/reg_stats.m b/reg_stats.m new file mode 100644 index 0000000..0a9f1a2 --- /dev/null +++ b/reg_stats.m @@ -0,0 +1,32 @@ +function [tot_ref, tot_obs] = reg_stats(ref_field, mval, theta, lambda, varargin) + +A = area_wghts(theta, 0.5) +A = A*ones(1,length(lambda)); + + +for i = 1:length(ref_field) + % This step ensures that both the ref_field and obs_field are converted + % to vectors of the same length + tmp_ref = ref_field{i}(ref_field{i}~=mval); + tmp_A = A(ref_field{i}~=mval); + A_tot = sum(tmp_A); + wt_ref = tmp_A.*tmp_ref/A_tot; + tot_ref(i,1) = sum(wt_ref); + for k = 1:length(varargin) + tmp_obs(:,k) = varargin{k}{i}(ref_field{i}~=mval); + wt_obs = tmp_A.*tmp_obs(:,k)/A_tot; + tot_obs(i,k) = sum(wt_obs); + end + +% keyboard + + +% keyboard + + + + + +end + + diff --git a/regrid.m b/regrid.m new file mode 100644 index 0000000..74f7c63 --- /dev/null +++ b/regrid.m @@ -0,0 +1,71 @@ +function [nudat,xg,yg] = regrid(olddat,oldlat,oldz,nulat,nuz,del,nrng) +% newdat = regrid(olddat, oldx, oldy, newx, newy, del, nrng) +% +% this function regrids olddat with corresponding x and y (ordinate and +% abcissa, respectively) onto a new grid newx and newy. This function uses +% the zgrid program which resides in this same directory. Try 'help zgrid' +% with this directory on your path to learn more about the many features of +% zgrid (not to be confused with matlab's own 'zgrid'). +% Values for del and nrng +% del : d << 1 to interpolate mainly in x +% : suggest .1 for adcp data and 10 for ctd data +% nrng : interpolate/extrapolate no more than +% : nrng grid points away from a data point +% : suggest 3 for adcp data +% +% example from adcpsect output. Note sizes: +% data (NO x NA) +% abcissa (1 x NA) new abcissa (1 x NEWA) +% ordinate (NO x 1) new ordinate(NEWO x 1) +% +% u = regrid(uv(:,1:2:end),xyt(2,:),zc,lat,zc,1,3); +% +% +% + + + +if nargin<7 + nrng=2; +end + +[xb, yb] = blank(oldz, oldlat, olddat); + +[zg,xg,yg,zb]=zgrid(oldz,oldlat,olddat,nuz,nulat,... + 'cay',1,'del',del,'nrng',nrng,xb,yb); + +nudat = zg; + +%------------------------------ +function [xb, yb] = blank(x, y, Z) +[nr, nc] = size(Z); + +[y, ii] = sort(y); +Z = Z(:,ii); + +% dx and dy are used to push the polygon out a bit, to +% avoid unwanted blanking due to roundoff. +dx = min(abs(diff(x(:))))*0.01; +% assuming x is depth, positive downward. +% y is increasing; it has been sorted. +dy = min(abs(diff(y(:))))*0.01; + +X = x(:,ones(nc,1)); +badmask = isnan(Z); + +X(badmask) = NaN; +xmax = max(X) + dx; +xmin = min(X) - dx; + +ii = isnan(xmax); +xmax(ii) = max(xmin); +xmin(ii) = xmax(ii)-dx; + +xmax(2:(end-1)) = max([ xmax(1:(end-2)); xmax(2:(end-1)); xmax(3:end)]); +xmin(2:(end-1)) = min([ xmin(1:(end-2)); xmin(2:(end-1)); xmin(3:end)]); + +y(1) = y(1) - dy; +y(end) = y(end) + dy; + +xb = [xmax xmin(end:-1:1) xmax(1)]; +yb = [y y(end:-1:1) y(1)]; diff --git a/remmn.m b/remmn.m new file mode 100644 index 0000000..fc14154 --- /dev/null +++ b/remmn.m @@ -0,0 +1,18 @@ +function [A, mn] = remmn(inpt, clms, type); + +if nargin < 2, clms = [1 2 4]; end +if nargin < 3, type = 'full'; end + + +A = zeros(size(inpt)); + +if strcmp(type, 'full') + A(1, :) = inpt(1, :); + A(2:end, 1:3) = inpt(2:end, 1:3); + + nts = size(inpt, 1) - 1; + + mn = nanmean(inpt(2:end, clms(3):end)); + + A(2:end, 4:end) = inpt(2:end, 4:end) - ones(nts, 1)*mn; +end diff --git a/remmnthmn.m b/remmnthmn.m new file mode 100644 index 0000000..ca7d6a3 --- /dev/null +++ b/remmnthmn.m @@ -0,0 +1,18 @@ +function [rsdl, mmn] = remmnthmn(inpt, period, clms, mval) + + +% inpt = findtstps(inpt, period); +mnths = cell2mat(inpt(:, clms(1))); +yrs = cell2mat(inpt(:, clms(2))); + +mmn = spatmn(inpt, period, 'monthly', clms, mval); + +for i = 1:12 + mnth_indx = find(mnths == i); + + for j = 1:length(mnth_indx) + rsdl{mnth_indx(j), 1} = mnths(mnth_indx(j)); + rsdl{mnth_indx(j), 2} = yrs(mnth_indx(j)); + rsdl{mnth_indx(j), 3} = inpt{mnth_indx(j), clms(3)} - mmn{i}; + end +end \ No newline at end of file diff --git a/remsc.m b/remsc.m new file mode 100644 index 0000000..55b3993 --- /dev/null +++ b/remsc.m @@ -0,0 +1,26 @@ +function [Res, SC] = remsc(inpt, clms) +% The function computes and removes the seasonal cycle from the input +% time-series +if isnumeric(inpt) + SC = tsmean(inpt, 'monthly', 'clms', clms); + + nyrs = (length(inpt)-1)/12; + +% keyboard + Res = inpt; + Res(2:end, clms(3):end) = inpt(2:end, clms(3):end) - repmat(SC(2:end, 2:end), nyrs, 1); + +elseif iscell(inpt) + if nargin < 2 + clms = [3 4 8]; + end + + SC = spatmn(inpt, [inpt{1,clms(2)} inpt{end,clms(2)}], 'monthly', clms); + nyrs = length(inpt)/12; + Res = inpt; + + for i = 1:length(inpt) + Res{i,clms(3)} = Res{i,clms(3)} - SC{Res{i,clms(1)}}; + end + +end diff --git a/rotateticklabel.m b/rotateticklabel.m new file mode 100644 index 0000000..155a538 --- /dev/null +++ b/rotateticklabel.m @@ -0,0 +1,58 @@ +function th=rotateticklabel(h,rot,demo) +%ROTATETICKLABEL rotates tick labels +% TH=ROTATETICKLABEL(H,ROT) is the calling form where H is a handle to +% the axis that contains the XTickLabels that are to be rotated. ROT is +% an optional parameter that specifies the angle of rotation. The default +% angle is 90. TH is a handle to the text objects created. For long +% strings such as those produced by datetick, you may have to adjust the +% position of the axes so the labels don't get cut off. +% +% Of course, GCA can be substituted for H if desired. +% +% TH=ROTATETICKLABEL([],[],'demo') shows a demo figure. +% +% Known deficiencies: if tick labels are raised to a power, the power +% will be lost after rotation. +% +% See also datetick. + +% Written Oct 14, 2005 by Andy Bliss +% Copyright 2005 by Andy Bliss + +%DEMO: +if nargin==3 + x=[now-.7 now-.3 now]; + y=[20 35 15]; + figure + plot(x,y,'.-') + datetick('x',0,'keepticks') + h=gca; + set(h,'position',[0.13 0.35 0.775 0.55]) + rot=90; +end + +%set the default rotation if user doesn't specify +if nargin==1 + rot=90; +end +%make sure the rotation is in the range 0:360 (brute force method) +while rot>360 + rot=rot-360; +end +while rot<0 + rot=rot+360; +end +%get current tick labels +a=get(h,'XTickLabel'); +%erase current tick labels from figure +set(h,'XTickLabel',[]); +%get tick label positions +b=get(h,'XTick'); +c=get(h,'YTick'); +%make new tick labels +if rot<180 + th=text(b,repmat(c(1)-.1*(c(2)-c(1)),length(b),1),a,'HorizontalAlignment','right','rotation',rot); +else + th=text(b,repmat(c(1)-.1*(c(2)-c(1)),length(b),1),a,'HorizontalAlignment','left','rotation',rot); +end + diff --git a/sampleacf.m b/sampleacf.m new file mode 100644 index 0000000..bc1d69e --- /dev/null +++ b/sampleacf.m @@ -0,0 +1,16 @@ +function R = sampleacf(inpt, lam) + +% Computes the autocorrelation function of a matrix +nts = size(inpt, 1); + +mn = mean(inpt, 1); +den = sum((inpt - ones(nts, 1)*mn).^2); + +for i = 0:lam + indx1 = i+1:nts; + indx2 = 1:nts-i; + + R(i+1, :) = sum((inpt(indx1, :) - ones(length(indx1),1) * mn).* ... + (inpt(indx2, :) - ones(length(indx1),1) * mn))./ ... + den; +end diff --git a/satboxplot.m b/satboxplot.m new file mode 100644 index 0000000..17db854 --- /dev/null +++ b/satboxplot.m @@ -0,0 +1,58 @@ +function h=satboxplot(x, y, bincentres, outlierstyle, varargin) + +% satboxplot Modified boxplot +% +% Alternative boxplot. Bins the data (in y) according to bins (in xbin). +% For each bin, plots the median (by default a horizontal red line), +% a box connecting to the 25th and 75th percentile, and whiskers extending +% to the 1st and 99th percentile. All other data are considered outliers +% and plotted individually. To change what the result looks like, add +% additional arguments that will be passed on to boxplot +% +% FORMAT +% +% h = satboxplot(x, y, xbin, outlierstyle, ...) +% +% IN +% +% x numeric array +% Data according to which y is binned. +% y numeric array +% Data for which statistics are carried out, binned +% according to the values of x, etc. +% bincentres Bin centres for x +% outlierstyle Style for outliers, e.g. 'kx' +% ... all additional arguments passed on to boxplot +% +% OUT +% +% h numeric scalar, plot handle +% handle to outliers (for other handles, see boxplot) +% +% $Id$ + +binedges = [-inf (bincentres(1:end-1)+bincentres(2:end))/2 inf]; +values_cell = bin(x, y, binedges); +% convert values to a matrix with binned values in the columns, rest nans +maxsize = max(cellfun(@length, values_cell)); +vals_mapped = cellfun(@(v) [v; nan(maxsize-length(v), 1)], values_cell(1:end-1), 'UniformOutput', false); +values = horzcat(vals_mapped{:}); + +q = quantile(values, [0.01 0.25 0.5 0.75 0.99]); + +for c = 1:size(values, 2) + toosmall = values(:, c) < q(1, c); + toolarge = values(:, c) > q(5, c); + outliers = [values(toosmall, c); values(toolarge, c)]; + all_outliers(1:length(outliers), c) = outliers; +end + +all_outliers(all_outliers==0)=nan; +hold on; +boxplot(q, bincentres, 'positions', bincentres, 'labelorientation', 'inline', 'whisker', inf, varargin{:}); +set(gca,'xtickmode','auto','xticklabelmode','auto'); +if ~isempty(all_outliers) + h=plot(bincentres, all_outliers, outlierstyle); +else + h = -1; +end diff --git a/sc2cs.m b/sc2cs.m new file mode 100755 index 0000000..f7eca59 --- /dev/null +++ b/sc2cs.m @@ -0,0 +1,24 @@ +function cs = sc2cs(field) + +% SC2CS(FIELD) converts the rectangular (L+1)x(2L+1) matrix FIELD, containing +% spherical harmonics coefficients in /S|C\ storage format into a +% square (L+1)x(L+1) matrix in |C\S| format. +% +% Nico Sneeuw +% Munich, 22/07/94 + +% uses none + +[rows,cols] = size(field); +lmax = rows -1; +if cols == rows + cs = field; + display('Field was already in CS format') +elseif cols ~= 2*lmax+1, + error('Matrix dimensions must be (L+1)x(2L+1).'), +elseif cols == 2*lmax+1 + c = field(:,lmax+1:2*lmax+1); + s = [zeros(lmax+1,1) field(:,1:lmax)]; + + cs = tril(c) + triu(rot90(s),1); +end diff --git a/scalecov.m b/scalecov.m new file mode 100644 index 0000000..0e4e587 --- /dev/null +++ b/scalecov.m @@ -0,0 +1,46 @@ +function [Qs, scle] = scalecov(Q, dg_std) +% The function scales a e.g. simulated covariance matrix Q with the +% magnitude of some errors sdevs depending on their autocovariances. +%-------------------------------------------------------------------------- +% Input: Q [n x n] Covariance matrix + +% sdevs [m x m] +% +% Output: A [n x 1] area of the pixels on the surface +% of the Earth [m^2] +%-------------------------------------------------------------------------- +% Author: Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% Date: October 20011 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +% Checking the input format of the simulated errors and rearange the +% elements in a column-vector +% [frmt, lmx, dg_std] = chkfrmt(sdevs, 'cvec'); + +% Create a column-vector from the diagonal elements of Q +dg_Q = sqrt(diag(Q)); + +% Auto-covariance of the simulated covariance matrix +AK_Q = dg_Q*dg_Q'; + +% Auto-covariance of the errors +AK_std = dg_std*dg_std'; + +% Computing the scale-factor +scle = AK_std./AK_Q; + +% Re-scaling the simulated covariance matrix +Qs = Q.*scle; + + + + + + + + + + + diff --git a/scatter_line.m b/scatter_line.m new file mode 100644 index 0000000..cd4a658 --- /dev/null +++ b/scatter_line.m @@ -0,0 +1,53 @@ +function [] = scatter_line(ref_ts, varargin) + + + +clr = [ 60 60 60; + 050 136 189; + 244 109 67; + 026 152 080; + 240 130 40; + 0 200 200; + 230 220 50; + 160 0 200; + 160 230 50; + 0 160 255; + 240 0 130; + 230 175 45; + 0 210 140; + 130 0 220]/255; + + + +for i = 1:length(varargin) + mdl_ts(:, i) = varargin{i}; + + A = [mdl_ts(:, i) ones(size(mdl_ts,1),1)]; + y = ref_ts; + + A(isnan(y), :) = []; + y(isnan(y)) = []; + + y(isnan(A), :) = []; + A(isnan(A), :) = []; + + xht(:,i) = inv(A'*A)*A'*y; + +end + + +figure; +hold on + +for i = 1:size(mdl_ts, 2) + scatter(mdl_ts(:,i), ref_ts, 50, clr(i,:), 'filled'); + g = refline(xht(:,i)); + set(g, 'Color', clr(i,:)); + set(g, 'Linewidth', 1.5); + +end +axis equal + + + + diff --git a/showeofana.m b/showeofana.m new file mode 100644 index 0000000..4ff12a2 --- /dev/null +++ b/showeofana.m @@ -0,0 +1,17 @@ +function [] = showeofana(eof_s, pcs, theta, lambda, sel_area, mxmde); +load coast +k = 1; +for i = 1:mxmde + subplot(mxmde, 2, k); + imagesc(lambda, theta, eof_s{i}); + hold on + plot(long, lat, 'k', 'linewidth', 1.5) + axis([sel_area(1), sel_area(2), sel_area(3), sel_area(4)]); + axis xy +% caxis([0 1]); + + subplot(mxmde, 2, k+1); + plot(pcs(:,i)); + + k = k + 2; +end \ No newline at end of file diff --git a/signal_map.m b/signal_map.m new file mode 100644 index 0000000..623e5ff --- /dev/null +++ b/signal_map.m @@ -0,0 +1,27 @@ +function A = signal_map(inpt, indx_vec, id_map) +% The function assigns a value (an element in the inpt-vector) to an area +% of a map refferenced by the id_map. +%-------------------------------------------------------------------------- +% Input: inpt [n x 1] Vector which contains the values to be +% assigned to a map +% indx_vec [n x 1] Vector which contains the ids of the +% areas +% id_map [r x c] Map which contains the ids for each +% area +% +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: August 2012 +%-------------------------------------------------------------------------- +% Uses: +if length(inpt) ~= length(indx_vec) + error('Inpt and indx_vec must have the same length!') +end + +A = zeros(size(id_map)).*NaN; +for i = 1:length(indx_vec) + A(id_map == indx_vec(i)) = inpt(i); +end + +% plotglbl(A) + diff --git a/sortem.m b/sortem.m new file mode 100644 index 0000000..e1c4d88 --- /dev/null +++ b/sortem.m @@ -0,0 +1,31 @@ +function [P2,D2]=sortem(P,D) +% this function takes in two matrices P and D, presumably the output +% from Matlab's eig function, and then sorts the columns of P to +% match the sorted columns of D (going from largest to smallest) +% +% EXAMPLE: +% +% D = +% -90 0 0 +% 0 -30 0 +% 0 0 -60 +% P = +% 1 2 3 +% 1 2 3 +% 1 2 3 +% +% [P,D]=sortem(P,D) +% P = +% 2 3 1 +% 2 3 1 +% 2 3 1 +% D = +% -30 0 0 +% 0 -60 0 +% 0 0 -90 + + +D2=diag(sort(diag(D),'descend')); % make diagonal matrix out of sorted diagonal values of input D +[c, ind]=sort(diag(D),'descend'); % store the indices of which columns the sorted eigenvalues come from +P2=P(:,ind); % arrange the columns in this order + diff --git a/spat_agg.m b/spat_agg.m new file mode 100644 index 0000000..adcd0ff --- /dev/null +++ b/spat_agg.m @@ -0,0 +1,55 @@ +function S = spat_agg(inpt, mask, mval, cell_area, fac) +% The function aggregates an input field over an area which is defined by +% the mask. If there are any missing values in the input field, the +% function does not consider these values. Furthermore, the output can be +% weighted with the cell area. If cell_area is a matrix containing the +% areas of the cells, the output S is the area weighted mean of the input. +% Otherwise, S is simply the mean value of inpt. +% If furthermore fac is defined, the output is divided by this value (e.g. +% conversion from mm/month to mm/day) +% ------------------------------------------------------------------------- +% INPUT: inpt [n x m] Matrix/Vector containing the input field +% mask [n x m] Matrix/Vector defining the area of interest +% mval scalar Defines missing values in the input field +% cell_area [n x m] Matrix/Vector containing the area of the +% respective grid-cell +% fac scalar A scalar by which the output is divided +% ------------------------------------------------------------------------- +% OUTPUT S scalar Aggregated value +% ------------------------------------------------------------------------- +% Author: Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% Date: May 2011 +% ------------------------------------------------------------------------- + + +if nargin < 5 + fac = 1; +end + +if nargin < 4 + cell_area = ones(size(inpt)); +end + +if nargin < 3 + mval = -9999; +end + +if nargin < 2 + mask = ones(size(inpt)); +end + +% Setting the grid-cells with missing values to zero +mask(inpt == mval) = 0; + +% Multiplying the mask with the area of the pixels (if provided) +mask = cell_area.*mask; + +% Computing the aggregated area of all valid grid cells +ar = sum(sum(mask)); + +% Multiplicating the input field with the grid-cell area +tmp = mask.*inpt; + +% Computing the output as the weighted mean (by considering the factor fac) +S = sum(sum(tmp))/(ar*fac); + diff --git a/spat_agg_corr.m b/spat_agg_corr.m new file mode 100644 index 0000000..cb25617 --- /dev/null +++ b/spat_agg_corr.m @@ -0,0 +1,84 @@ +function [R] = spat_agg_corr(inpt1, inpt2, mask, mval, cell_area, fac) +% The function aggregates an input field over an area which is defined by +% the mask. If there are any missing values in the input field, the +% function does not consider these values. Furthermore, the output can be +% weighted with the cell area. If cell_area is a matrix containing the +% areas of the cells, the output S is the area weighted mean of the input. +% Otherwise, S is simply the mean value of inpt. +% If furthermore fac is defined, the output is divided by this value (e.g. +% conversion from mm/month to mm/day) +% ------------------------------------------------------------------------- +% INPUT: inpt [n x m] Matrix/Vector containing the input field +% mask [n x m] Matrix/Vector defining the area of interest +% mval scalar Defines missing values in the input field +% cell_area [n x m] Matrix/Vector containing the area of the +% respective grid-cell +% fac scalar A scalar by which the output is divided +% ------------------------------------------------------------------------- +% OUTPUT S scalar Aggregated value +% ------------------------------------------------------------------------- +% Author: Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% Date: May 2011 +% ------------------------------------------------------------------------- + +if nargin < 6 + fac = 1; +end + +if nargin < 5 + cell_area = ones(size(inpt1)); +end + +if nargin < 4 + mval = -9999; +end + +if nargin < 3 + mask = ones(size(inpt)); +end + +if nargin < 2 + inpt2 = inpt1; +end + +% Setting the grid-cells with missing values to zero +mask(inpt1 == mval) = 0; +mask(inpt2 == mval) = 0; + +% Multiplying the mask with the area of the pixels (if provided) +mask_ar = cell_area.*mask; + +% Computing the aggregated area of all valid grid cells +ar = sum(sum(mask_ar)); +mask_ar = mask_ar/ar; + +% Computing a vector containing the fracions of the total area +mask_v = mask_ar(mask == 1); + +% Multiplicating the input field with the grid-cell area +tmp1 = inpt1(mask == 1); +tmp2 = inpt2(mask == 1); + +% Computing the weighted mean of both input fields +mn1 = mask_v'*tmp1; +mn2 = mask_v'*tmp2; + +% Computing the deviation between the actual gridpoint value and the mean +d1 = tmp1 - mn1; +d2 = tmp2 - mn2; + +% Computing the standard deviations of both fields +sig1 = sqrt(mask_v'.*d1'*d1); +sig2 = sqrt(mask_v'.*d2'*d2); + +% Computing the spatial correlation between both fields +R = (mask_v'.*d1'*d2)/(sig1*sig2); +E = sqrt((mask_v'.*(d2-d1)'*(d2-d1))); + + + +% keyboard + + + + diff --git a/spat_mean.m b/spat_mean.m new file mode 100644 index 0000000..3a37dfb --- /dev/null +++ b/spat_mean.m @@ -0,0 +1,81 @@ +function otpt = spat_mean(inpt, time, tscale, mval) + +% This function computes the mean of a time-series of global fields of an +% arbitrary quantity. Therefore, it can compute a monthly mean (i.e. the +% mean of all Januaries etc.), an annual mean (i.e. the mean of all +% complete years) and a seasonal mean (i.e. the mean of winter, spring, +% summer and autumn). + + +sind = find(cell2mat(inpt(:,4)) == 1 & cell2mat(inpt(:,5)) == time(1)); +eind = find(cell2mat(inpt(:,4)) == 12 & cell2mat(inpt(:,5)) == time(2)); + +% If we want to compute a seasonal mean, we need the December before +% t_start and January + February after t_end +if strcmp(tscale, 'seasonal') + sind = sind - 1; + eind = eind - 1; +end + +fields = inpt(sind:eind,[4 5 9]); + + + + +% We won't need the other components of the input data so we can delete it +clear inpt + +if strcmp(tscale, 'seasonal') + mnths = [12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; + for i = 1:12 + indices(:,i) = find(cell2mat(fields(:,1)) == mnths(i)); + end + + otpt = cell(1,4); + + for i = 1:4 + snl_mn = zeros(360, 720); + mask = ones(360, 720); + for j = 1:size(indices,1) + for k = 1:3 + nr_days = daysinmonth(fields{indices(j,(i-1)*3+k),1}, ... + fields{indices(j,(i-1)*3+k),2}); + snl_mn = snl_mn + fields{indices(j,(i-1)*3+k),3}/nr_days; + mask(fields{indices(j,(i-1)*3+k),3} == mval) = 0; + end + end + otpt{1,i} = snl_mn/(3*size(indices,1)); + otpt{1,i}(mask == 0) = mval; + end + +elseif strcmp(tscale, 'annual') + syear = fields{1,2}; + eyear = fields{end,2}; + + otpt = zeros(360, 720); + mask = ones(360, 720); + + for t = syear:eyear + indices = find(cell2mat(fields(:,2)) == t); + tmp_mn = zeros(360, 720); + + for i = 1:12 + nr_days = daysinmonth(i, t); + tmp_mn = tmp_mn + fields{indices(i), 3}/nr_days; + mask(fields{indices(i),3} == mval) = 0; + end + + otpt = otpt + tmp_mn/12; + end + otpt = otpt/(eyear-syear); + otpt(mask == 0) = mval; + +end + + + + + + + + diff --git a/spataggmn.m b/spataggmn.m new file mode 100644 index 0000000..b64c7fa --- /dev/null +++ b/spataggmn.m @@ -0,0 +1,189 @@ +function otpt = spataggmn(inpt, id_map, area_id, varargin) +% The function computes time-series of area-weighted means over selected +% areas. These areas are defined in the id_map (a matrix where connected +% regions have the same id). The user can choose multiple areas according +% to their area_id. +% If information about the timesteps (year, month) is provided, the +% function saves the months and years in the first two columns of the +% output and the appropriate serial date number (according to matlabs +% datetick-specification) in the third column. The first row contains the +% area_id(s) of the selected regions while the elements of the remaining +% (2:end) rows are the area-weighted means of the areas. +%-------------------------------------------------------------------------- +% Input: inpt {m x n} Cell array which contains the input +% fields. +% id_map [i x j] Map which defines the different areas +% area_id [1 x k] Vector (or scalar) which contains the +% ids of the desired areas +% clms [1 x 3] column indices of the input containing +% month, year and the corresponding field +% [1 x 1] column index of the input fields +% miss [1 x 1] value of undefined elements in the +% input fields +% +% Output: otpt [m x k] matrix containing the area-weighted +% means +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: July 2011 +%-------------------------------------------------------------------------- +% Uses: area_wghts.m, cell2catchmat.m +%-------------------------------------------------------------------------- +% Updates: - 25.01.2013 For-loops removed, switched to matrix-based +% computation +%-------------------------------------------------------------------------- + +% Checking input arguments and setting default values +pp = inputParser; +pp.addRequired('inpt', @(x)iscell(x)); +pp.addRequired('id_map', @(x) (isnumeric(x) | iscell(x))); +pp.addRequired('area_id', @isnumeric); + +pp.addParamValue('theta', (89.75:-0.5:-89.75)', @isnumeric); +pp.addParamValue('dlambda', 0.5, @isnumeric); +pp.addParamValue('clms', [3 4 8], @isnumeric); +pp.addParamValue('miss', -9999, @(x) (isnumeric(x) | strcmp(x, 'NaN'))); +pp.addParamValue('method', 'wmean') +pp.addParamValue('areamethod', 'regular') + +pp.parse(inpt, id_map, area_id, varargin{:}) + +clms = pp.Results.clms; +miss = pp.Results.miss; +method = pp.Results.method; +theta = pp.Results.theta; +dlambda = pp.Results.dlambda; +areamethod = pp.Results.areamethod; + +clear pp + +nr_tstps = length(inpt(:, clms(1))); +nr_catch = length(area_id); + + +if length(clms) == 4 + dys = cell2mat(inpt(:, clms(1))); + mnths = cell2mat(inpt(:, clms(2))); + yrs = cell2mat(inpt(:, clms(3))); + dte = datenum(yrs, mnths, dys); + [f_rws, f_cls] = size(inpt{1,clms(3)}); + + + flds = inpt(:,clms(4)); + otpt = zeros(nr_tstps + 1, nr_catch + 4); + otpt(1, 5:end) = area_id; + otpt(2:end, 1) = dys; + otpt(2:end, 2) = mnths; + otpt(2:end, 3) = yrs; + otpt(2:end, 4) = dte; + + dte_els = 4; + + + +elseif length(clms) == 3 + + mnths = cell2mat(inpt(:, clms(1))); + yrs = cell2mat(inpt(:, clms(2))); + dte = datenum(yrs, mnths, ones(nr_tstps,1)*15); + [f_rws, f_cls] = size(inpt{1,clms(3)}); + + flds = inpt(:,clms(3)); + otpt = zeros(nr_tstps + 1, nr_catch + 3); + otpt(1, 4:end) = area_id; + otpt(2:end, 1) = mnths; + otpt(2:end, 2) = yrs; + otpt(2:end, 3) = dte; + + dte_els = 3; + +elseif length(clms) == 1 + + [f_rws, f_cls] = size(inpt{1,clms(1)}); + + otpt = zeros(nr_tstps + 1, nr_catch + 1); + otpt(1, 2:end) = area_id; + otpt(2:end, 1) = (1:nr_tstps)'; + + flds = inpt(:,clms(1)); + + dte_els = 1; + +end + +clear inpt + + +% Create a binary mask to remove all "unwanted" elements +bin_mask = zeros(size(flds{1})); + +for i = 1:nr_catch + bin_mask(id_map == area_id(i)) = 1; +end + +% Go through the input dataset and set all missing elements to zero in the +% binary and the id map +for i = 1:nr_tstps + bin_mask(flds{i} == miss) = 0; +end +id_map(bin_mask == 0) = 0; + + +% Re-arrange the maps to vectors which contain only the non-zero elements +% of bin_mask +bin_vec = bin_mask(bin_mask ~= 0); +id_vec = id_map(bin_mask ~= 0); + + + +% For each region, a row in the matrix H is created. At this stage, H is +% binary and defines if an element in the data matrix contains to the +% current catchment or not. +for i = 1:nr_catch + tmp = bin_vec; + tmp(id_vec ~= area_id(i)) = 0; + H(:, i) = tmp; +end + +% For weighted computations, a matrix A_mer is created which contains the +% areas of the pixels. This is used, depending on the chosen method, to +% apply area weights to the elements in the data matrix. As these are all +% linear operations (y=A*H), the weights are added to the H-matrix. +if strcmp(method, 'wmean') + A_mer = area_wghts(theta', dlambda, 'mat', areamethod); + A_mer = A_mer(bin_mask ~= 0); + + H = H.*repmat(A_mer, [1 nr_catch]); + H = H./(ones(length(bin_vec), 1)*sum(H)); + +elseif strcmp(method, 'mean') + H = H./(ones(length(bin_vec), 1)*sum(H)) + +elseif strcmp(method, 'wsum') + A_mer = area_wghts(theta', dlambda, 'mat', areamethod); + A_mer = A_mer(bin_mask ~= 0); + + H = H.*repmat(A_mer, [1 nr_catch]); +end + +% Re-arrange the input fields in a big matrix, which contains only the +% pixels which are located in one of the areas of interest +flds_mat = cell2catchmat(flds, bin_mask); + +% Now, the aggregated values can be computed by simple matrix +% multiplication: +otpt(2:end, dte_els+1:end) = flds_mat*H; + + + + + + + + + + + + + + diff --git a/spataggmn_flist.m b/spataggmn_flist.m new file mode 100644 index 0000000..95e2f2b --- /dev/null +++ b/spataggmn_flist.m @@ -0,0 +1,41 @@ +function [] = spataggmn_flist(fnmes, id_map, area_id, outnme) +% This function acts as an "input-parser" for the spataggmn.m function, +% which allows the processing of several input FILES, given in the +% inpt-parameter + +if ischar(fnmes) + otpt.filelist = fnmes; + tmp = importdata(fnmes); + clear fnames + fnmes = tmp; +end + +h = waitbar(0,'','Name','Number of datasets processed...'); +for i = 1:length(fnmes) + + + varnme = who('-file', fnmes{i}); + + otpt.filename{i,1} = fnmes{i}; + otpt.varname{i,1} = varnme; + + load(fnmes{i}); + + data = eval(varnme{1}); + otpt.data{i,1} = spataggmn(data, id_map, area_id); + clear('data') + + save(outnme, 'otpt'); + + if exist('h') + waitbar(i/length(fnmes), h, [int2str(i), ' of ', length(fnmes)]) + end + + +end + + + + + + \ No newline at end of file diff --git a/spataggmn_new.m b/spataggmn_new.m new file mode 100644 index 0000000..aca9f78 --- /dev/null +++ b/spataggmn_new.m @@ -0,0 +1,195 @@ +function otpt = spataggmn_new(inpt, id_map, area_id, varargin) +% The function computes time-series of area-weighted means over selected +% areas. These areas are defined in the id_map (a matrix where connected +% regions have the same id). The user can choose multiple areas according +% to their area_id. +% If information about the timesteps (year, month) is provided, the +% function saves the months and years in the first two columns of the +% output and the appropriate serial date number (according to matlabs +% datetick-specification) in the third column. The first row contains the +% area_id(s) of the selected regions while the elements of the remaining +% (2:end) rows are the area-weighted means of the areas. +%-------------------------------------------------------------------------- +% Input: inpt {m x n} Cell array which contains the input +% fields. +% id_map [i x j] Map which defines the different areas +% area_id [1 x k] Vector (or scalar) which contains the +% ids of the desired areas +% clms [1 x 3] column indices of the input containing +% month, year and the corresponding field +% [1 x 1] column index of the input fields +% miss [1 x 1] value of undefined elements in the +% input fields +% +% Output: otpt [m x k] matrix containing the area-weighted +% means +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: July 2011 +%-------------------------------------------------------------------------- +% Uses: area_wghts.m, cell2catchmat.m +%-------------------------------------------------------------------------- +% Updates: - 25.01.2013 For-loops removed, switched to matrix-based +% computation +%-------------------------------------------------------------------------- + +% Checking input arguments and setting default values +pp = inputParser; +pp.addRequired('inpt', @(x)iscell(x)); +pp.addRequired('id_map', @(x) (isnumeric(x) | iscell(x))); +pp.addRequired('area_id', @isnumeric); + +pp.addParamValue('theta', (89.75:-0.5:-89.75)', @isnumeric); +pp.addParamValue('dlambda', 0.5, @isnumeric); +pp.addParamValue('clms', [3 4 8], @isnumeric); +pp.addParamValue('miss', -9999, @(x) (isnumeric(x) | strcmp(x, 'NaN'))); +pp.addParamValue('method', 'wmean') +pp.addParamValue('areamethod', 'regular') + +pp.parse(inpt, id_map, area_id, varargin{:}) + +clms = pp.Results.clms; +miss = pp.Results.miss; +method = pp.Results.method; +theta = pp.Results.theta; +dlambda = pp.Results.dlambda; +areamethod = pp.Results.areamethod; + +clear pp + + +if size(area_id,1) > 1 + area_id = area_id'; +end + + +nr_tstps = length(inpt(:, clms(1))); +nr_catch = length(area_id); + + +if length(clms) == 4 + dys = cell2mat(inpt(:, clms(1))); + mnths = cell2mat(inpt(:, clms(2))); + yrs = cell2mat(inpt(:, clms(3))); + dte = datenum(yrs, mnths, dys); + [f_rws, f_cls] = size(inpt{1,clms(3)}); + + + flds = inpt(:,clms(4)); + otpt = zeros(nr_tstps + 1, nr_catch + 4); + otpt(1, 5:end) = area_id; + otpt(2:end, 1) = dys; + otpt(2:end, 2) = mnths; + otpt(2:end, 3) = yrs; + otpt(2:end, 4) = dte; + + dte_els = 4; + + + +elseif length(clms) == 3 + + mnths = cell2mat(inpt(:, clms(1))); + yrs = cell2mat(inpt(:, clms(2))); + dte = datenum(yrs, mnths, ones(nr_tstps,1)*15); + [f_rws, f_cls] = size(inpt{1,clms(3)}); + + flds = inpt(:,clms(3)); + otpt = zeros(nr_tstps + 1, nr_catch + 3); + otpt(1, 4:end) = area_id; + otpt(2:end, 1) = mnths; + otpt(2:end, 2) = yrs; + otpt(2:end, 3) = dte; + + dte_els = 3; + +elseif length(clms) == 1 + + [f_rws, f_cls] = size(inpt{1,clms(1)}); + + otpt = zeros(nr_tstps + 1, nr_catch + 1); + otpt(1, 2:end) = area_id; + otpt(2:end, 1) = (1:nr_tstps)'; + + flds = inpt(:,clms(1)); + + dte_els = 1; + +end + +clear inpt + + +% Create a binary mask to remove all "unwanted" elements +bin_mask = zeros(size(flds{1})); + +for i = 1:nr_catch + bin_mask(id_map == area_id(i)) = 1; +end + +% Go through the input dataset and set all missing elements to zero in the +% binary and the id map +for i = 1:nr_tstps + bin_mask(flds{i} == miss) = 0; +end +id_map(bin_mask == 0) = 0; + + +% Re-arrange the maps to vectors which contain only the non-zero elements +% of bin_mask +bin_vec = bin_mask(bin_mask ~= 0); +id_vec = id_map(bin_mask ~= 0); + + + +% For each region, a row in the matrix H is created. At this stage, H is +% binary and defines if an element in the data matrix contains to the +% current catchment or not. +for i = 1:nr_catch + tmp = bin_vec; + tmp(id_vec ~= area_id(i)) = 0; + H(:, i) = tmp; +end + +% For weighted computations, a matrix A_mer is created which contains the +% areas of the pixels. This is used, depending on the chosen method, to +% apply area weights to the elements in the data matrix. As these are all +% linear operations (y=A*H), the weights are added to the H-matrix. +if strcmp(method, 'wmean') + A_mer = area_wghts(theta', dlambda, 'mat', areamethod); + A_mer = A_mer(bin_mask ~= 0); + + H = H.*repmat(A_mer, [1 nr_catch]); + H = H./(ones(length(bin_vec), 1)*sum(H)); + +elseif strcmp(method, 'mean') + H = H./(ones(length(bin_vec), 1)*sum(H)) + +elseif strcmp(method, 'wsum') + A_mer = area_wghts(theta', dlambda, 'mat', areamethod); + A_mer = A_mer(bin_mask ~= 0); + + H = H.*repmat(A_mer, [1 nr_catch]); +end + +% Re-arrange the input fields in a big matrix, which contains only the +% pixels which are located in one of the areas of interest +flds_mat = cell2catchmat(flds, bin_mask); + +% Now, the aggregated values can be computed by simple matrix +% multiplication: +otpt(2:end, dte_els+1:end) = flds_mat*H; + + + + + + + + + + + + + + diff --git a/spataggmn_old.m b/spataggmn_old.m new file mode 100644 index 0000000..c9f62b7 --- /dev/null +++ b/spataggmn_old.m @@ -0,0 +1,178 @@ +function otpt = spataggmn(inpt, id_map, area_id, varargin) +% The function computes time-series of area-weighted means over selected +% areas. These areas are defined in the id_map (a matrix where connected +% regions have the same id). The user can choose multiple areas according +% to their area_id. +% If information about the timesteps (year, month) is provided, the +% function saves the months and years in the first two columns of the +% output and the appropriate serial date number (according to matlabs +% datetick-specification) in the third column. The first row contains the +% area_id(s) of the selected regions while the elements of the remaining +% (2:end) rows are the area-weighted means of the areas. +%-------------------------------------------------------------------------- +% Input: inpt {m x n} Cell array which contains the input +% fields. +% id_map [i x j] Map which defines the different areas +% area_id [1 x k] Vector (or scalar) which contains the +% ids of the desired areas +% clms [1 x 3] column indices of the input containing +% month, year and the corresponding field +% [1 x 1] column index of the input fields +% miss [1 x 1] value of undefined elements in the +% input fields +% +% Output: otpt [m x k] matrix containing the area-weighted +% means +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: July 2011 +%-------------------------------------------------------------------------- +% Uses: area_wghts.m +%-------------------------------------------------------------------------- + +% Checking input arguments and setting default values +pp = inputParser; +pp.addRequired('inpt', @(x)iscell(x)); +pp.addRequired('id_map', @(x) (isnumeric(x) | iscell(x))); +pp.addRequired('area_id', @isnumeric); + +pp.addParamValue('theta', (89.75:-0.5:-89.75)', @isnumeric); +pp.addParamValue('dlambda', 0.5, @isnumeric); +pp.addParamValue('clms', [3 4 8], @isnumeric); +pp.addParamValue('miss', -9999, @(x) (isnumeric(x) | strcmp(x, 'NaN'))); +pp.addParamValue('method', 'wmean') +pp.addParamValue('areamethod', 'regular') + +pp.parse(inpt, id_map, area_id, varargin{:}) + +clms = pp.Results.clms; +miss = pp.Results.miss; +method = pp.Results.method; +theta = pp.Results.theta; +dlambda = pp.Results.dlambda; +areamethod = pp.Results.areamethod; + +clear pp + + +if size(area_id,1) > 1 + area_id = area_id'; +end + + +nr_tstps = length(inpt(:, clms(1))); +nr_catch = length(area_id); + + +if length(clms) == 4 + dys = cell2mat(inpt(:, clms(1))); + mnths = cell2mat(inpt(:, clms(2))); + yrs = cell2mat(inpt(:, clms(3))); + dte = datenum(yrs, mnths, dys); + [f_rws, f_cls] = size(inpt{1,clms(3)}); + + + flds = inpt(:,clms(4)); + otpt = zeros(nr_tstps + 1, nr_catch + 4); + otpt(1, 5:end) = area_id; + otpt(2:end, 1) = dys; + otpt(2:end, 2) = mnths; + otpt(2:end, 3) = yrs; + otpt(2:end, 4) = dte; + + dte_els = 4; + + + +elseif length(clms) == 3 + + mnths = cell2mat(inpt(:, clms(1))); + yrs = cell2mat(inpt(:, clms(2))); + dte = datenum(yrs, mnths, ones(nr_tstps,1)*15); + [f_rws, f_cls] = size(inpt{1,clms(3)}); + + flds = inpt(:,clms(3)); + otpt = zeros(nr_tstps + 1, nr_catch + 3); + otpt(1, 4:end) = area_id; + otpt(2:end, 1) = mnths; + otpt(2:end, 2) = yrs; + otpt(2:end, 3) = dte; + + dte_els = 3; + +elseif length(clms) == 1 + + [f_rws, f_cls] = size(inpt{1,clms(1)}); + + otpt = zeros(nr_tstps + 1, nr_catch + 1); + otpt(1, 2:end) = area_id; + otpt(2:end, 1) = (1:nr_tstps)'; + + flds = inpt(:,clms(1)); + + dte_els = 1; + +end + +clear inpt +[nlats, nlons] = size(flds{1}); + +A_mer = area_wghts(theta', dlambda, 'mat', areamethod); + +if nr_catch > 10 + h = waitbar(0,'','Name','...% of catchments computed'); +end + +for i = 1:nr_catch + + for j = 1:nr_tstps + mask = zeros(nlats, nlons); + mask(id_map == area_id(i)) = 1; + if isnan(miss) + mask(isnan(flds{j})) = 0; + flds{j}(isnan(flds{j})) = 0; + else + mask(flds{j} == miss) = 0; + end + + if strcmp(method, 'wmean') + tmp = mask.*A_mer; + A_ctch = tmp/sum(sum(tmp)); + otpt(j+1, i+dte_els) = sum(sum(flds{j}.*A_ctch)); + + elseif strcmp(method, 'mean') + tmp = mask.*flds{j}; + otpt(j+1, i+dte_els) = sum(sum(tmp))/sum(sum(mask)); + + elseif strcmp(method, 'sum') + tmp = mask.*flds{j}; + otpt(j+1, i+dte_els) = sum(sum(tmp)); + + elseif strcmp(method, 'wsum') + tmp = mask.*A_mer.*flds{j}; + otpt(j+1, i+dte_els) = sum(sum(tmp)); + end + end + + + if exist('h') + waitbar(i/nr_catch, h, [int2str((i*100)/nr_catch) '%']) + end + +end + +if exist('h') + close(h) +end + + + + + + + + + + + + diff --git a/spatcoor.m b/spatcoor.m new file mode 100644 index 0000000..936b6ed --- /dev/null +++ b/spatcoor.m @@ -0,0 +1,46 @@ +function R = spatcorr(fld1, fld2, varargin{:}) + + + + +% Checking input arguments and setting default values +pp = inputParser; +pp.addRequired('fld1', @isnumeric); +pp.addRequired('fld2', @isnumeric); + +pp.addParamValue('mask', ones(size(fld1)), @isnumeric); +pp.addParamValue('method', 'awghts') +pp.addParamValue('mval1', -9999, @isnumeric); +pp.addParamValue('mval2', -9999, @isnumeric); +pp.addParamValue('theta', (89.75:-0.5:-89.75)', @isnumeric); +pp.addParamValue('dlambda', 0.5, @isnumeric); + +pp.parse(fld1, fld2, varargin{:}) + +mask = pp.Results.clms; +method = pp.Results.method; +mval1 = pp.Results.mval1; +mval2 = pp.Results.mval2; +theta = pp.Results.theta; +dlamba = pp.Results.dlambda; + + +% Removing the missing values for both fields +mask(fld1 == mval1) = 0; +mask(fld2 == mval2) = 0; + +fld1 = fld1.*mask; +fld2 = fld2.*mask; + + +if strcmp(method, 'awghts') + A = area_wghts(theta, dlambda, 'mat', 'haversine'); + + mn1 = sum(sum(fld1.*A.*mask))./sum(sum(A.*mask)); + mn2 = sum(sum(fld2.*A.*mask))./sum(sum(A.*mask)); + + + + + + diff --git a/spatmn.m b/spatmn.m new file mode 100644 index 0000000..40b1190 --- /dev/null +++ b/spatmn.m @@ -0,0 +1,199 @@ +function otpt = spatmn(inpt, time, tscale, clms, mval, method) + +% Of a given input dataset, this function computes the long-term mean +% either as the mean of the whole timeseries, a seasonal mean or a monthly +% mean. +% ------------------------------------------------------------------------- +% Input: inpt 'cell' The input dataset must be a cell variable +% which contains the global field itself and a +% time stamp +% time [1 x 2] Defines the start- and end-year of the +% time-series which is considered +% tscale 'string' 'complete' -> complete time-series +% 'annual' -> mean of each year +% 'seasonal' -> long-term mean of the four +% seasons DJF, MAM, JJA, SON +% 'monthly' -> long-term mean for each month +% clms [1 x 3] tells the function which row of the input +% dataset contains month, year and the global +% field, i.e. clms(1) -> month, clms(2) -> year +% and clms(3) -> field +% +% Output: otpt {1 x 1} long-term annual mean global field +% {1 x 4} long-term mean seasonal global fields +% {1 x 12} long-term mean monthly global fields + +% ------------------------------------------------------------------------- +% Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% Jaunary 2011 +% ------------------------------------------------------------------------- +% Uses: findtsps.m +% ------------------------------------------------------------------------- + + +if nargin < 6, method = 'wmean'; end +if nargin < 5, mval = -9999; end +if nargin < 4, clms = [3 4 8]; end + + +fields = findtstps_cell(inpt, [time(1) time(2)], clms(2)); + +mnths = cell2mat(fields(:, clms(1))); +yrs = cell2mat(fields(:, clms(2))); +fields = fields(:, clms(3)); +fsze = size(fields{1}); + + +if strcmp(mval, 'NaN') + for i = 1:length(fields) + fields{i}(isnan(fields{i})) = -9999; + end + mval = -9999; +end + +if strcmp(method, 'wmean') + dom = eomday(yrs, mnths); + dots = sum(dom); +end + +if strcmp(tscale, 'complete') + + otpt = zeros(fsze); + vclls = zeros(fsze); +% keyboard + if strcmp(method, 'wmean') + for i = 1:length(fields) + otpt = otpt + dom(i)*fields{i}; + vclls(fields{i} ~= mval) = vclls(fields{i} ~= mval) + dom(i); + end + otpt(vclls ~= 0) = otpt(vclls ~= 0)./vclls(vclls ~= 0); + + elseif strcmp(method, 'mean') + for i = 1:length(fields) + otpt = otpt + fields{i}; + vclls(fields{i} ~= mval) = vclls(fields{i} ~= mval) + 1; + end + otpt(vclls ~= 0) = otpt(vclls ~= 0)./vclls(vclls ~= 0); + + elseif strcmp(method, 'sum') + for i = 1:length(fields) + otpt = otpt + fields{i}; + vclls(fields{i} ~= mval) = vclls(fields{i} ~= mval) + 1; + end + otpt(vclls < i) = mval; + end + otpt(vclls == 0) = mval; + + + +elseif strcmp(tscale, 'annual') + yrs_u = unique(yrs); + + for i = 1:length(yrs_u) + + yr_indx = find(yrs == yrs_u(i)); + otpt{i,1} = yrs_u(i); + otpt{i,2} = zeros(fsze); + vclls = zeros(fsze); + tmp = zeros(fsze); + + if strcmp(method, 'wsum') + for j = 1:length(yr_indx) + tmp = tmp + dom(yr_indx(j))*fields{yr_indx(j)} + vclls(fields{yr_indx(j)} ~= mval) = ... + vclls(fields{yr_indx(j)} ~= mval) + dom(yr_indx(j)); + end + else + for j = 1:length(yr_indx) + tmp = tmp + fields{yr_indx(j)}; + vclls(fields{yr_indx(j)} ~= mval) = ... + vclls(fields{yr_indx(j)} ~= mval) + 1; + end + end + + otpt{i,2}(vclls ~= 0) = tmp(vclls ~= 0)./vclls(vclls ~= 0); + + end + + +elseif strcmp(tscale, 'seasonal') + % Find the fields' indices of the four seasons + ssn_indx{1} = find(mnths == 12 | mnths == 1 | mnths == 2); + ssn_indx{2} = find(mnths == 3 | mnths == 4 | mnths == 5); + ssn_indx{3} = find(mnths == 6 | mnths == 7 | mnths == 8); + ssn_indx{4} = find(mnths == 9 | mnths == 10 | mnths == 11); + + + for i = 1:4 + vclls = zeros(fsze); + otpt{1,i} = zeros(fsze); + tmp = zeros(fsze); + + if strcmp(method, 'wmean') + for j = 1:length(ssn_indx{i}) + tmp = tmp + dom(ssn_indx{i}(j))*fields{ssn_indx{i}(j)}; + vclls = eval_clls(vclls, fields{ssn_indx{i}(j)}, ... + mval, dom(ssn_indx{i}(j))); + end + otpt{1,i}(vclls ~= 0) = tmp(vclls ~= 0)./vclls(vclls ~= 0); + + elseif strcmp(method, 'mean') + for j = 1:length(ssn_indx{i}) + tmp = tmp + fields{ssn_indx{i}(j)}; + vclls = eval_clls(vclls, fields{ssn_indx{i}(j)}, mval, 1); + end + otpt{1,i}(vclls ~= 0) = tmp(vclls ~= 0)./vclls(vclls ~= 0); + + elseif strcmp(method, 'sum') + for j = 1:length(ssn_indx{i}) + tmp = tmp + fields{ssn_indx{i}(j)}; + vclls = eval_clls(vclls, fields{ssn_indx{i}(j)}, mval, 1); + end + tmp(vclls == 0) = mval; + end + + end + + + + elseif strcmp(tscale, 'monthly') + + for i = 1:12 + indx = find(mnths == i); + + otpt{i,1} = zeros(fsze); + tmp = zeros(fsze); + vclls = zeros(fsze); + + if strcmp(method, 'wmean') + for j = 1:length(indx) + tmp = tmp + dom(indx(j))*fields{indx(j)}; + vclls = eval_clls(vclls, fields{indx(j)}, mval, dom(indx(j))); + end + else + for j = 1:length(indx) + tmp = tmp + fields{indx(j)}; + vclls = eval_clls(vclls, fields{indx(j)}, mval, 1); + end + end + + tmp(vclls ~= 0) = tmp(vclls ~= 0)./vclls(vclls ~= 0); + otpt{i,1} = tmp; + + if strcmp(mval, 'NaN') + otpt{i,1}(vcllss == 0) = NaN; + else + otpt{i,1}(vclls == 0) = mval; + end + end + +end + + + +function vclls = eval_clls(vclls, field, mval, add_nr) + % The sub-routine adds a number add_nr to each location of a field + % without a missing value. + vclls(field ~= mval) = vclls(field ~= mval) + add_nr; + + diff --git a/spher2cart.m b/spher2cart.m new file mode 100644 index 0000000..2bff1ad --- /dev/null +++ b/spher2cart.m @@ -0,0 +1,17 @@ +function X = spher2cart(L, B, H); + + +% Parameter des WGS84 +a = 6378137; % große Halbachse +b = 6356752.314; % kleine Halbachse + +% Querkrümmungshalbmesser +N = a^2/sqrt(a^2*(cos(B*pi/180))^2 + b^2*(sin(B*pi/180))^2); + +% Koordinaten im kart. Koordinatensystem +X = [(N+H)*cos(B*pi/180)*cos(L*pi/180); + (N+H)*cos(B*pi/180)*sin(L*pi/180); + N*sin(B*pi/180)*b^2/a^2 + H*sin(B*pi/180)]; + + + diff --git a/spherdist.m b/spherdist.m new file mode 100644 index 0000000..2322837 --- /dev/null +++ b/spherdist.m @@ -0,0 +1,34 @@ +function [d d_r] = spherdist(la_A, th_A, la_B, th_B) + +% spherdist.m computes the spherical distance between two points A and B, +% which are given in spherical longitude lambda and co-latitude theta. +%-------------------------------------------------------------------------- +% Input: la_A, th_A [1 x 1] longitude and co-latitude of A [deg] +% la_B, th_B [1 x 1] longitude and co-latitude of B [deg] +% +% Output: d [1 x 1] spherical distance between A and B [m] +% d_r [1 x 1] angular distance between A and B [rad] +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: 8. Sep. 08 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- + +if nargin < 4 + dlon = dlat; +end + +la_A = la_A*pi/180; +th_A = th_A*pi/180; +la_B = la_B*pi/180; +th_B = th_B*pi/180; + +% Radius of the Earth +R = 6378137; + +% Angular distance between A and B [rad] +d_r = acos(sin(th_A).*sin(th_B).*cos(la_A - la_B) + cos(th_A).*cos(th_B)); + +% Spherical distance between A and B [m] +d = R*d_r; diff --git a/ssa.m b/ssa.m new file mode 100644 index 0000000..966c807 --- /dev/null +++ b/ssa.m @@ -0,0 +1,96 @@ + +function [y,r,vr]=ssa(x1,L) + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% ----------------------------------------------------------------- +% Author: Francisco Javier Alonso Sanchez e-mail:fjas@unex.es +% Departament of Electronics and Electromecanical Engineering +% Industrial Engineering School +% University of Extremadura +% Badajoz +% Spain +% ----------------------------------------------------------------- +% +% SSA generates a trayectory matrix X from the original series x1 +% by sliding a window of length L. The trayectory matrix is aproximated +% using Singular Value Decomposition. The last step reconstructs +% the series from the aproximated trayectory matrix. The SSA applications +% include smoothing, filtering, and trend extraction. +% The algorithm used is described in detail in: Golyandina, N., Nekrutkin, +% V., Zhigljavsky, A., 2001. Analisys of Time Series Structure - SSA and +% Related Techniques. Chapman & Hall/CR. + +% x1 Original time series (column vector form) +% L Window length +% y Reconstructed time series +% r Residual time series r=x1-y +% vr Relative value of the norm of the approximated trajectory matrix with respect +% to the original trajectory matrix + +% The program output is the Singular Spectrum of x1 (must be a column vector), +% using a window length L. You must choose the components be used to reconstruct +%the series in the form [i1,i2:ik,...,iL], based on the Singular Spectrum appearance. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + + + +% Step1 : Build trayectory matrix + + N=length(x1); + if L>N/2;L=N-L;end + K=N-L+1; + X=zeros(L,K); + for i=1:K + X(1:L,i)=x1(i:L+i-1); + end + +% Step 2: SVD + + S=X*X'; + [U,autoval]=eig(S); + [d,i]=sort(-diag(autoval)); + d=-d; + U=U(:,i);sev=sum(d); + plot((d./sev)*100),hold on,plot((d./sev)*100,'rx'); + title('Singular Spectrum');xlabel('Eigenvalue Number');ylabel('Eigenvalue (% Norm of trajectory matrix retained)') + V=(X')*U; + rc=U*V'; + +% Step 3: Grouping + + I=input('Choose the agrupation of components to reconstruct the series in the form I=[i1,i2:ik,...,iL] ') + Vt=V'; + rca=U(:,I)*Vt(I,:); + +% Step 4: Reconstruction + + y=zeros(N,1); + Lp=min(L,K); + Kp=max(L,K); + + for k=0:Lp-2 + for m=1:k+1; + y(k+1)=y(k+1)+(1/(k+1))*rca(m,k-m+2); + end + end + + for k=Lp-1:Kp-1 + for m=1:Lp; + y(k+1)=y(k+1)+(1/(Lp))*rca(m,k-m+2); + end + end + + for k=Kp:N + for m=k-Kp+2:N-Kp+1; + y(k+1)=y(k+1)+(1/(N-k))*rca(m,k-m+2); + end + end + + figure;subplot(2,1,1);hold on;xlabel('Data poit');ylabel('Original and reconstructed series') + plot(x1);grid on;plot(y,'r') + + r=x1-y; + subplot(2,1,2);plot(r,'g');xlabel('Data poit');ylabel('Residual series');grid on + vr=(sum(d(I))/sev)*100; + diff --git a/sub_mac.m b/sub_mac.m new file mode 100644 index 0000000..6fea2f9 --- /dev/null +++ b/sub_mac.m @@ -0,0 +1,14 @@ +function otpt = sub_mac(inpt); + +mac = tsmean(inpt, 'monthly'); + + +otpt = inpt; + +for i = 1:12 + mind = find(inpt(:, 1) == i); + + for j = 1:length(mind) + otpt(mind(j), 4:end) = inpt(mind(j), 4:end) - mac(i+1, 2:end); + end +end \ No newline at end of file diff --git a/svd_ana.m b/svd_ana.m new file mode 100644 index 0000000..3802c64 --- /dev/null +++ b/svd_ana.m @@ -0,0 +1,177 @@ +function [eofs, ecof, lams, recon] = svd_ana(inpt1, inpt2, varargin); + + + +pp = inputParser; +pp.addRequired('inpt1', @(x) (iscell(x) | isnumeric(x))); +pp.addRequired('inpt2', @(x) (iscell(x) | isnumeric(x))); + +pp.addParamValue('weightflg', true, @islogical); +pp.addParamValue('theta', (89.75:-0.5:-89.75)', @isnumeric); +pp.addParamValue('dlambda', 0.5, @isnumeric); +pp.addParamValue('clms', [4 5 9], @isnumeric); +pp.addParamValue('miss', -9999, @(x) (isnumeric(x) | strcmp(x, 'NaN'))); +pp.addParamValue('mask', 0, @isnumeric); +pp.addParamValue('mxmde', 0, @isint); +pp.addParamValue('dorecon', true, @islogical); +pp.addParamValue('addmn', true, @islogical); +pp.addParamValue('containsmiss', false, @islogical); + +pp.parse(inpt1, inpt2, varargin{:}); + +weightflg = pp.Results.theta; +theta = pp.Results.theta; +dlambda = pp.Results.dlambda; +clms = pp.Results.clms; +miss = pp.Results.miss; +mask = pp.Results.mask; +mxmde = pp.Results.mxmde; +dorecon = pp.Results.dorecon; +addmn = pp.Results.addmn; +containsmiss = pp.Results.containsmiss; + +[rws, cls] = size(inpt1{1, clms(3)}); + +% Compute weight factors (cos(theta)). +if weightflg == true + if size(theta, 2) > size(theta, 1) + theta = theta'; + end + weights = cos(theta*pi/180)*ones(1, cls); +else + weights = ones(rws, cls); +end + + + +% Domain selection with a binary mask +if size(mask) == [1 1] + if containsmiss == true + + mask = ones(360, 720); + mask(inpt1{1, clms(3)} == miss) = 0; + mask(inpt2{1, clms(3)} == miss) = 0; + mask_vec = mask(:); + c_indx = find(mask_vec == 1); + + for i = 1:size(inpt, 1) + tmp1 = inpt1{i, clms(3)}.*weights; + tmp2 = inpt2{i, clms(3)}.*weights; + F1(i,:) = tmp1(mask == 1)'; + F2(i,:) = tmp2(mask == 1)'; + end + + else + for i = 1:size(inpt, 1) + tmp1 = inpt1{i, clms(3)}.*weights; + F1(i,:) = tmp1(:)'; + F2(i,:) = tmp2(:)'; + end + c_indx = ones(rws*cls,1); + end +elseif size(mask) == [rws, cls] + + if containsmiss == true + mask(inpt1{1, clms(3)} == miss) = 0; + mask(inpt2{1, clms(3)} == miss) = 0; + end + mask_vec = mask(:); + c_indx = find(mask_vec == 1); + + for i = 1:size(inpt, 1) + tmp1 = inpt1{i, clms(3)}.*weights; + tmp2 = inpt2{i, clms(3)}.*weights; + F1(i,:) = tmp1(mask == 1)'; + F2(i,:) = tmp2(mask == 1)'; + end +end + +[n, p] = size(F1); + + +% Removing the mean from the data +mn_F1 = mean(F1,1); +mn_F2 = mean(F2,1); + +F1_prime = F1 - ones(n, 1)*mn_F1; +F2_prime = F2 - ones(n, 1)*mn_F2; + + +% Compute the covariance matrix between inpt1 and inpt2 +C = F1_prime'*F2_prime; + +% Eigenvectors and eigenvalues of the covariance matrix R = F'*F +if mxmde == 0 | mxmde == p + [U, P, eofs] = svd(C); +else + [U, P, eofs] = svds(C, mxmde); +end + +% Calculate the expansion coefficients +ecof1 = F1_prime*U; +ecof2 = F2_prime*V; + +ecof1 = [cell2mat(inpt1(:, clms(1))) cell2mat1(inpt(:, clms(2))) ecof1]; +ecof2 = [cell2mat(inpt2(:, clms(1))) cell2mat2(inpt(:, clms(2))) ecof2]; + +% Compute the squared covariance fraction +scf(:,1) = diag(P); +scf(:,2) = scf(:,1)*100./sum(scf(:,1)); + +% Compute the map for the eofs and reconstruct the data +if dorecon == true + + for i = 1:size(eofs,2) + tmp = zeros(rws*cls, 1)*NaN; + tmp(c_indx,1) = eofs(:,i); + + recon.eofs{i,1} = reshape(tmp, rws, cls); + end + + + F_recon = pcs(:, 3:end)*eofs'; + + mn_fld = zeros(rws*cls, 1)*NaN; + if addmn == true + mn_fld(c_indx,1) = mn_F(:); + end + + for i = 1:size(F_recon, 1) + tmp = zeros(rws*cls, 1)*NaN; + tmp(c_indx, 1) = F_recon(i,:)'; + + recon.F{i,1} = inpt{i, clms(1)}; + recon.F{i,2} = inpt{i, clms(2)}; + recon.F{i,3} = reshape(tmp + mn_fld, rws, cls); + end +else + recon = 0; +end + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/t_test_corr.m b/t_test_corr.m new file mode 100644 index 0000000..59bbe8b --- /dev/null +++ b/t_test_corr.m @@ -0,0 +1,15 @@ +function PG = t_test_corr(R, n); +% +% if nargin < 2 +% R = matrix_corr(inpt, 'pearson'); +% end + +PG = zeros(size(R)); + +for i = 1:size(R,1); + PG(i, i:end) = R(i,i:end).*sqrt((n-2)./(1-R(i,i:end).^2)); +end + + +Q = tinv(0.95, 119); +keyboard \ No newline at end of file diff --git a/tavgflds.m b/tavgflds.m new file mode 100644 index 0000000..3bf720b --- /dev/null +++ b/tavgflds.m @@ -0,0 +1,12 @@ +function otpt = tavgflds(inpt, mval) + +for i = 1:length(inpt) + if i == 1 + otpt{i,1} = 3/4*inpt{1} + 1/4*inpt{2}; + elseif i == length(inpt) + otpt{i,1} = 3/4*inpt{i} + 1/4*inpt{i-1}; + else + otpt{i,1} = 1/4*inpt{i-1} + 1/2*inpt{i} + 1/4*inpt{i+1}; + end +% otpt{i}(inpt{i} == mval) = mval; +end diff --git a/taylor_stats.m b/taylor_stats.m new file mode 100644 index 0000000..4afb0f0 --- /dev/null +++ b/taylor_stats.m @@ -0,0 +1,38 @@ +function [R E sig] = taylor_stats(r, varargin); + +if size(r, 2) > 1, r = r'; end +% for i = 1:length(varargin) +ndta = numel(varargin); + +% keyboard +% [rr cr] = size(r); + +% Compute a big matrix containing all the time-series +ts_mat(:,1) = r; +for i = 1:ndta + tmp = varargin{i}; + if size(tmp, 2) > 1, tmp = tmp'; end + + ts_mat(:,i+1) = varargin{i}; +end + +% Look for NaNs in the Time-series and remove the appropriate rows +[nan_r, nan_c] = find(isnan(ts_mat)); +ts_mat(nan_r, :) = []; + +nts = size(ts_mat, 1); + +mn = mean(ts_mat, 1); +sig = std(ts_mat, 1); +ts_mat_c = ts_mat - ones(nts, 1)*mn; +C = (ts_mat_c'*ts_mat)./(nts*sig'*sig); +R = C(1,:); +E = sqrt(1/nts*sum((ts_mat_c - ts_mat_c(:,1)*ones(1, ndta+1)).^2)); + + + + + + + + diff --git a/taylor_stats_2d.m b/taylor_stats_2d.m new file mode 100644 index 0000000..005fcc1 --- /dev/null +++ b/taylor_stats_2d.m @@ -0,0 +1,113 @@ +function [R E sig] = taylor_stats_glob(r, cswitch, mval, varargin); + +% Similar function like taylor_stats.m which allows two-dimensional fields +% as input quantities. + + +[rr cr] = size(r); + +for k = 1:length(varargin) + [rf cf] = size(varargin{k}); + if rr ~= rf | cr ~= cf + error('Input fields must have the same dimension!') + end +end + +cell_area = area_wghts(0.25:0.5:179.75, 0.5); +cell_area = cell_area'*ones(1,720); + +mask = gen_mask(cswitch); + +mask(r == mval) = 0; + +for i = 1:length(varargin) + % Set the value of any location which contains mval in a + % dataset to zero + mask(varargin{i} == mval) = 0; +end + +mask_ar = cell_area.*mask; +ar = sum(sum(mask_ar)); +mask_ar = mask_ar/ar; +mask_v = mask_ar(mask == 1); + +flds_vec(:,1) = r(mask == 1); + +for i = 1:length(varargin) + flds_vec(:,i+1) = varargin{i}(mask == 1); +end + +for i = 1:length(varargin)+1 + mn(1,i) = mask_v'*flds_vec(:,i); + + if i == 1 + d1 = flds_vec(:,1) - mn(1,1); + end + + d2 = flds_vec(:,i) - mn(1,i); + + sig(1,i) = sqrt(mask_v'.*d2'*d2); + R(1,i) = (mask_v'.*d1'*d2)/(sig(1,i)*sig(1,1)); + E(1,i) = sqrt((mask_v'.*(d2-d1)'*(d2-d1))); + +end + + clear flds_vec mask ar_wts ar_tot tmp n + + +elseif cswitch == 5 + + for j = 1:11 + + mask = mask_t; + mask(continents ~= j) = 0; + mask(r == mval) = 0; + + for i = 1:length(varargin) + % Set the value of any location which contains mval in a + % dataset to zero + mask(varargin{i} == mval) = 0; + end + + ar_wts = A.*mask; + ar_tot = sum(sum(ar_wts)); + ar_wts = ar_wts/ar_tot; + + ar_wts2 = ar_wts(mask==1); + + flds_vec(:,1) = r(mask == 1); + + for i = 1:length(varargin) + tmp = varargin{i}; + flds_vec(:,i+1) = tmp(mask == 1); + end + + for i = 1:length(varargin)+1 + mn(j,i) = ar_wts2'*flds_vec(:,i); + + if i == 1 + d1 = flds_vec(:,1) - mn(j,1); + end + + d2 = flds_vec(:,i) - mn(j,i); + + sig(j,i) = sqrt(ar_wts2'.*d2'*d2); + R(j,i) = (ar_wts2'.*d1'*d2)/(sig(j,i)*sig(j,1)); + E(j,i) = sqrt((ar_wts2'.*(d2-d1)'*(d2-d1))); + + end + + clear flds_vec mask ar_wts ar_tot tmp n + end +end + + + + + + + + + + + diff --git a/taylor_stats_cont.m b/taylor_stats_cont.m new file mode 100644 index 0000000..59401eb --- /dev/null +++ b/taylor_stats_cont.m @@ -0,0 +1,52 @@ +function [R E sig] = taylor_stats_cont(r, obs, contindx, mval); + +% Similar function like taylor_stats.m which allows two-dimensional fields +% as input quantities. + + +[rr cr] = size(r); +[ro co] = size(obs); + +if rr ~= ro | cr ~= co + error('Input fields must have the same dimension!') +end + +if strcmp(mval, 'NaN') + r(isnan(r)) = -9999; + obs(isnan(obs)) = -9999; + mval = -9999; +end + +cell_area = area_wghts(0.25:0.5:179.75, 0.5); +cell_area = cell_area'*ones(1,720); + +load continents.asc + + for i = 1:contindx + mask = zeros(360, 720); + mask(continents == contindx(i)) = 1; + mask(r == mval) = 0; + mask(obs == mval) = 0; + + mask_ar = cell_area.*mask; + ar = sum(sum(mask_ar)); + mask_ar = mask_ar/ar; + mask_v = mask_ar(mask == 1); + + r_vec = r(mask == 1); + + for j = 1:length(obs) + f_vec = obs{i}(mask == 1); + mn = mask_v'*f_vec; + if i == 1 + d1 = r - mn; + end + d2 = f_vec - mn; + sig(j,i) = sqrt(mask_v'.*d2'*d2); + R(j,i) = (mask_v'.*d1'*d2)/(sig(1,i)*sig(1,1)); + E(j,i) = sqrt((mask_v'.*(d2-d1)'*(d2-d1))); + end +end + + + diff --git a/taylordiag.m b/taylordiag.m new file mode 100644 index 0000000..29ecc7e --- /dev/null +++ b/taylordiag.m @@ -0,0 +1,530 @@ +% TAYLORDIAG Plot a Taylor Diagram +% +% [hp ht axl] = taylordiag(STDs,RMSs,CORs,['option',value]) +% +% Plot a Taylor diagram from statistics of different series. +% +% INPUTS: +% STDs: Standard deviations +% RMSs: Centered Root Mean Square Difference +% CORs: Correlation +% +% Each of these inputs are one dimensional with same length. First +% indice corresponds to the reference serie for the diagram. For exemple +% STDs(1) is the standard deviation of the reference serie and STDs(2:N) +% are the standard deviations of the other series. +% +% Note that by definition the following relation must be true for all series i: +% RMSs(i) - sqrt(STDs(i).^2 + STDs(1)^2 - 2*STDs(i)*STDs(1).*CORs(i)) = 0 +% This relation is checked and if not verified an error message is sent. Please see +% Taylor's JGR article for more informations about this. +% You can use the ALLSTATS function to avoid this to happen, I guess ;-). You can get +% it somewhere from: http://codes.guillaumemaze.org/matlab +% +% OUTPUTS: +% hp: returns handles of plotted points +% ht: returns handles of the text legend of points +% axl: returns a structure of handles of axis labels +% +% LIST OF OPTIONS: +% For an exhaustive list of options to customize your diagram, please call the function +% without arguments: +% >> taylordiag +% +% SHORT TUTORIAL (see taylordiag_test.m for more informations): +% An easy way to get compute inputs is to use the ALLSTATS function you can get from: +% http://codes.guillaumemaze.org/matlab +% Let's say you gathered all the series you want to put in the Taylor diagram in a +% single matrix BUOY(N,nt) with N the number of series and nt their (similar) length. +% If BUOY(1,:) is the serie of reference for the diagram: +% for iserie = 2 : size(BUOY,1) +% S = allstats(BUOY(1,:),BUOY(iserie,:)); +% MYSTATS(iserie,:) = S(:,2); % We get stats versus reference +% end%for iserie +% MYSTATS(1,:) = S(:,1); % We assign reference stats to the first row +% Note that the ALLSTATS function can handle NaNs, so be careful to compute statistics +% with enough points ! +% Then you're ready to simply run: +% taylordiag(MYSTATS(:,2),MYSTATS(:,3),MYSTATS(:,4)); +% +% REF: K. Taylor +% Summarizing multiple aspects of model performance in a single diagram +% Journal of Geophysical Research-Atmospheres, 2001, V106, D7. +% +% Rev. by Guillaume Maze on 2010-02-10: Help more helpful ! Options now displayed by call. +% Copyright (c) 2008 Guillaume Maze. +% http://codes.guillaumemaze.org +% All rights reserved. + +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% * Redistributions of source code must retain the above copyright notice, this list of +% conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright notice, this list +% of conditions and the following disclaimer in the documentation and/or other materials +% provided with the distribution. +% * Neither the name of the Laboratoire de Physique des Oceans nor the names of its contributors may be used +% to endorse or promote products derived from this software without specific prior +% written permission. +% +% THIS SOFTWARE IS PROVIDED BY Guillaume Maze ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, +% INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +% PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Guillaume Maze BE LIABLE FOR ANY +% DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +% LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +% BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +% STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% + + + + +function varargout = taylordiag(varargin) + +%% +if nargin == 0 + disp_optionslist; + return +else + narg = nargin - 3; + if mod(narg,2) ~=0 + error('taylordiag.m : Wrong number of arguments') + end +end + +STDs = varargin{1}; +RMSs = varargin{2}; +CORs = varargin{3}; + +%% CHECK THE INPUT FIELDS: +apro = 100; +di = fix(RMSs*apro)/apro - fix(sqrt(STDs.^2 + STDs(1)^2 - 2*STDs*STDs(1).*CORs)*apro)/apro; +if find(di~=0) +% help taylordiag.m + ii = find(di~=0); + if length(ii) == length(di) + error(sprintf('taylordiag.m : Something''s wrong with ALL the datas\nYou must have:\nRMSs - sqrt(STDs.^2 + STDs(1)^2 - 2*STDs*STDs(1).*CORs) = 0 !')) + else + error(sprintf('taylordiag.m : Something''s wrong with data indice(s): [%i]\nYou must have:\nRMSs - sqrt(STDs.^2 + STDs(1)^2 - 2*STDs*STDs(1).*CORs) = 0 !',ii)) + end +end + +%% IN POLAR COORDINATES: +rho = STDs; +theta = real(acos(CORs)); +dx = rho(1); % Observed STD + +%% + + + +%% BEGIN THE PLOT HERE TO GET AXIS VALUES: +hold off +cax = gca; +tc = get(cax,'xcolor'); +%ls = get(cax,'gridlinestyle'); +ls = '-'; % DEFINE HERE THE GRID STYLE +next = lower(get(cax,'NextPlot')); + +%% LOAD CUSTOM OPTION OF AXE LIMIT: +nopt = narg/2; foundrmax = 0; +for iopt = 4 : 2 : narg+3 + optvalue = varargin{iopt+1}; + switch lower(varargin{iopt}), case 'limstd', rmax = optvalue; foundrmax=1; end +end + +% make a radial grid +hold(cax,'on'); +if foundrmax==0 + maxrho = max(abs(rho(:))); +else + maxrho = rmax; +end +hhh = line([-maxrho -maxrho maxrho maxrho],[-maxrho maxrho maxrho -maxrho],'parent',cax); +set(cax,'dataaspectratio',[1 1 1],'plotboxaspectratiomode','auto') +v = [get(cax,'xlim') get(cax,'ylim')]; +ticks = sum(get(cax,'ytick')>=0); +delete(hhh); + + +% check radial limits and ticks +rmin = 0; +if foundrmax == 0; + rmax = v(4); +end +rticks = max(ticks-1,2); +if rticks > 5 % see if we can reduce the number + if rem(rticks,2) == 0 + rticks = rticks/2; + elseif rem(rticks,3) == 0 + rticks = rticks/3; + end +end +rinc = (rmax-rmin)/rticks; +tick = (rmin+rinc):rinc:rmax; + +%% LOAD DEFAULT PARAMETERS: +if find(CORs<0) + Npan = 2; % double panel +else + Npan = 1; +end +tickRMSangle = 135; +showlabelsRMS = 1; +showlabelsSTD = 1; +showlabelsCOR = 1; +colSTD = [0 0 0]; +colRMS = [0 .6 0]; +colCOR = [0 0 1]; +tickCOR(1).val = [1 .99 .95 .9:-.1:0]; +tickCOR(2).val = [1 .99 .95 .9:-.1:0 -.1:-.1:-.9 -.95 -.99 -1]; +widthCOR = .8; +widthRMS = .8; +widthSTD = .8; +styleCOR = '-.'; +styleRMS = '--'; +styleSTD = ':'; +titleRMS = 1; +titleCOR = 1; +titleSTD = 1; +tickRMS = tick; rincRMS = rinc; +tickSTD = tick; rincSTD = rinc; + + +%% LOAD CUSTOM OPTIONS: +nopt = narg/2; +for iopt = 4 : 2 : narg+3 + optname = varargin{iopt}; + optvalue = varargin{iopt+1}; + switch lower(optname) + + case 'tickrms' + tickRMS = sort(optvalue); + rincRMS = (max(tickRMS)-min(tickRMS))/length(tickRMS); + case 'showlabelsrms' + showlabelsRMS = optvalue; + case 'tickrmsangle' + tickRMSangle = optvalue; + case 'colrms' + colRMS = optvalue; + case 'widthrms' + widthRMS = optvalue; + case 'stylerms' + styleRMS = optvalue; + case 'titlerms' + titleRMS = optvalue; + + case 'tickstd' + tickSTD = sort(optvalue); + rincSTD = (max(tickSTD)-min(tickSTD))/length(tickSTD); + case 'showlabelsstd' + showlabelsSTD = optvalue; + case 'colstd' + colstd = optvalue; + case 'widthstd' + widthSTD = optvalue; + case 'stylestd' + styleSTD = optvalue; + case 'titlestd' + titleSTD = optvalue; + case 'npan' + Npan = optvalue; + + case 'tickcor' + tickCOR(Npan).val = optvalue; + case 'colcor' + colCOR = optvalue; + case 'widthcor' + widthCOR = optvalue; + case 'stylecor' + styleCOR = optvalue; + case 'titlecor' + titleCOR = optvalue; + case 'showlabelscor' + showlabelsCOR = optvalue; + end +end + + +%% CONTINUE THE PLOT WITH UPDATED OPTIONS: + +% define a circle + th = 0:pi/150:2*pi; + xunit = cos(th); + yunit = sin(th); +% now really force points on x/y axes to lie on them exactly + inds = 1:(length(th)-1)/4:length(th); + xunit(inds(2:2:4)) = zeros(2,1); + yunit(inds(1:2:5)) = zeros(3,1); +% plot background if necessary + if ~ischar(get(cax,'color')), +% ig = find(th>=0 & th<=pi); + ig = 1:length(th); + patch('xdata',xunit(ig)*rmax,'ydata',yunit(ig)*rmax, ... + 'edgecolor',tc,'facecolor',get(cax,'color'),... + 'handlevisibility','off','parent',cax); + end + +% DRAW RMS CIRCLES: + % ANGLE OF THE TICK LABELS + c82 = cos(tickRMSangle*pi/180); + s82 = sin(tickRMSangle*pi/180); + for ic = 1 : length(tickRMS) + i = tickRMS(ic); + iphic = find( sqrt(dx^2+rmax^2-2*dx*rmax*xunit) >= i ,1); + ig = find(i*cos(th)+dx <= rmax*cos(th(iphic))); + hhh = line(xunit(ig)*i+dx,yunit(ig)*i,'linestyle',styleRMS,'color',[.6 .6 .6],'linewidth',widthRMS,... + 'handlevisibility','off','parent',cax); + if showlabelsRMS + text((i+rincRMS/20)*c82+dx,(i+rincRMS/20)*s82, ... + [' ' num2str(i)],'verticalalignment','bottom',... + 'handlevisibility','off','parent',cax,'color',colRMS,'rotation',tickRMSangle-90, 'fontsize', 18) + end + end + +% DRAW DIFFERENTLY THE CIRCLE CORRESPONDING TO THE OBSERVED VALUE +% hhh = line((cos(th)*dx),sin(th)*dx,'linestyle','--','color',colSTD,'linewidth',1,... +% 'handlevisibility','off','parent',cax); + + +% DRAW STD CIRCLES: + % draw radial circles + for ic = 1 : length(tickSTD) + i = tickSTD(ic); + hhh = line(xunit*i,yunit*i,'linestyle',styleSTD,'color',[.6 .6 .6],'linewidth',widthSTD,... + 'handlevisibility','off','parent',cax); + if showlabelsSTD + if Npan == 2 + if length(find(tickSTD==0)) == 0 + text(0,-rinc/20,'0','verticalalignment','top','horizontalAlignment','center',... + 'handlevisibility','off','parent',cax,'color',colSTD, 'fontsize', 18); + end + text(i,-rinc/20, ... + num2str(i),'verticalalignment','top','horizontalAlignment','center',... + 'handlevisibility','off','parent',cax,'color',colSTD, 'fontsize', 18) + else + if length(find(tickSTD==0)) == 0 + text(-rinc/20,rinc/20,'0','verticalalignment','middle','horizontalAlignment','right',... + 'handlevisibility','off','parent',cax,'color',colSTD, 'fontsize', 18); + end + text(-rinc/20,i, ... + num2str(i),'verticalalignment','middle','horizontalAlignment','right',... + 'handlevisibility','off','parent',cax,'color',colSTD, 'fontsize', 18) + end + end + end + + set(hhh,'linestyle','-', 'color', 'k') % Make outer circle solid + +% DRAW CORRELATIONS LINES EMANATING FROM THE ORIGIN: + corr = tickCOR(Npan).val; + th = acos(corr); + cst = cos(th); snt = sin(th); + cs = [-cst; cst]; + sn = [-snt; snt]; + line(rmax*cs,rmax*sn,'linestyle',styleCOR,'color',[.6 .6 .6],'linewidth',widthCOR,... + 'handlevisibility','off','parent',cax) + + % annotate them in correlation coef + if showlabelsCOR + rt = 1.05*rmax; + for i = 1:length(corr) + text(rt*cst(i),rt*snt(i),num2str(corr(i)),... + 'horizontalalignment','center',... + 'handlevisibility','off','parent',cax,'color',colCOR, 'fontsize', 18); + if i == length(corr) + loc = int2str(0); + loc = '1'; + else + loc = int2str(180+i*30); + loc = '-1'; + end + end + end + +% AXIS TITLES + axlabweight = 'bold'; + ix = 0; + if Npan == 1 + if titleSTD + ix = ix + 1; +% ax(ix).handle = ylabel('Standard deviation','color',colSTD,'fontweight',axlabweight, 'fontsize', 18); + ax(ix).handle = ylabel(' ','color',colSTD,'fontweight',axlabweight, 'fontsize', 18); + end + + if titleCOR + ix = ix + 1; + clear ttt + pos1 = 45; DA = 15; + lab = 'Correlation'; + c = fliplr(linspace(pos1-DA,pos1+DA,length(lab))); + dd = 1.1*rmax; ii = 0; + for ic = 1 : length(c) + ith = c(ic); + ii = ii + 1; + ttt(ii)=text(dd*cos(ith*pi/180),dd*sin(ith*pi/180),lab(ii)); + set(ttt(ii),'rotation',ith-90,'color',colCOR,'horizontalalignment','center',... + 'verticalalignment','bottom','fontsize',get(ax(1).handle,'fontsize'),'fontweight',axlabweight, 'fontsize', 18); + end + ax(ix).handle = ttt; + end + +% if titleRMS +% ix = ix + 1; +% clear ttt +% pos1 = tickRMSangle+(180-tickRMSangle)/2; DA = 15; pos1 = 160; +% lab = 'RMSD'; +% c = fliplr(linspace(pos1-DA,pos1+DA,length(lab))); +% dd = 1.05*tickRMS(1); +% dd = .95*tickRMS(2); +% ii = 0; +% for ic = 1 : length(c) +% ith = c(ic); +% ii = ii + 1; +% ttt(ii)=text(dx+dd*cos(ith*pi/180),dd*sin(ith*pi/180),lab(ii)); +% set(ttt(ii),'rotation',ith-90,'color',colRMS,'horizontalalignment','center',... +% 'verticalalignment','top','fontsize',get(ax(1).handle,'fontsize'),'fontweight',axlabweight, 'fontsize', 14); +% end +% ax(ix).handle = ttt; +% end + + + else + if titleSTD + ix = ix + 1; + ax(ix).handle =ylabel('Standard deviation','fontweight',axlabweight,'color',colSTD); + end + + if titleCOR + ix = ix + 1; + clear ttt + pos1 = 90; DA = 15; + lab = 'Correlation'; + c = fliplr(linspace(pos1-DA,pos1+DA,length(lab))); + dd = 1.1*rmax; ii = 0; + for ic = 1 : length(c) + ith = c(ic); + ii = ii + 1; + ttt(ii)=text(dd*cos(ith*pi/180),dd*sin(ith*pi/180),lab(ii)); + set(ttt(ii),'rotation',ith-90,'color',colCOR,'horizontalalignment','center',... + 'verticalalignment','bottom','fontsize',get(ax(1).handle,'fontsize'),'fontweight',axlabweight); + end + ax(ix).handle = ttt; + end + +% if titleRMS +% ix = ix + 1; +% clear ttt +% pos1 = 160; DA = 10; +% lab = 'RMSD'; +% c = fliplr(linspace(pos1-DA,pos1+DA,length(lab))); +% dd = 1.05*tickRMS(1); ii = 0; +% for ic = 1 : length(c) +% ith = c(ic); +% ii = ii + 1; +% ttt(ii)=text(dx+dd*cos(ith*pi/180),dd*sin(ith*pi/180),lab(ii)); +% set(ttt(ii),'rotation',ith-90,'color',colRMS,'horizontalalignment','center',... +% 'verticalalignment','bottom','fontsize',get(ax(1).handle,'fontsize'),'fontweight',axlabweight); +% end +% ax(ix).handle = ttt; +% end + end + + +% VARIOUS ADJUSTMENTS TO THE PLOT: + set(cax,'dataaspectratio',[1 1 1]), axis(cax,'off'); set(cax,'NextPlot',next); + set(get(cax,'xlabel'),'visible','on') + set(get(cax,'ylabel'),'visible','on') + % makemcode('RegisterHandle',cax,'IgnoreHandle',q,'FunctionName','polar'); + % set view to 2-D + view(cax,2); + % set axis limits + if Npan == 2 + axis(cax,rmax*[-1.15 1.15 0 1.15]); + line([-rmax rmax],[0 0],'color',tc,'linewidth',1.2); + line([0 0],[0 rmax],'color',tc); + else + axis(cax,rmax*[0 1.15 0 1.15]); +% axis(cax,rmax*[-1 1 -1.15 1.15]); + line([0 rmax],[0 0],'color',tc,'linewidth',1.2); + line([0 0],[0 rmax],'color',tc,'linewidth',2); + end + + +% FINALY PLOT THE POINTS: + hold on + ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'; + for ii = 1 : length(STDs) +% pp(ii)=polar(theta(ii),rho(ii)); + pp(ii)=plot(rho(ii)*cos(theta(ii)),rho(ii)*sin(theta(ii))); + set(pp(ii),'marker','.','markersize',20); + set(pp(ii),'color','k'); + if length(STDs)<=26 + tt(ii)=text(rho(ii)*cos(theta(ii)),rho(ii)*sin(theta(ii)),ALPHABET(ii),'color','k'); + elseif length(STDs)<=26*2 + tt(ii)=text(rho(ii)*cos(theta(ii)),rho(ii)*sin(theta(ii)),lower(ALPHABET(ii)),'color','k'); + else + error('sorry I don''t how to handle more than 52 points labels !'); + end + end + set(tt,'verticalalignment','bottom','horizontalalignment','right') + set(tt,'fontsize',20) + + +%%% OUTPUT +switch nargout + case 1 + varargout(1) = {pp}; + case 2 + varargout(1) = {pp}; + varargout(2) = {tt}; + case 3 + varargout(1) = {pp}; + varargout(2) = {tt}; + varargout(3) = {ax}; +end + + +end%function + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +function varargout = disp_optionslist(varargin) + +disp('General options:') +dispopt('''Npan''',sprintf('1 or 2: Panels to display (1 for positive correlations, 2 for positive and negative correlations).\n\t\tDefault value depends on CORs')); + +disp('RMS axis options:') +dispopt('''tickRMS''','RMS values to plot gridding circles from observation point'); +dispopt('''colRMS''','RMS grid and tick labels color. Default: green'); +dispopt('''showlabelsRMS''','0 / 1 (default): Show or not the RMS tick labels'); +dispopt('''tickRMSangle''','Angle for RMS tick lables with the observation point. Default: 135 deg.'); +dispopt('''styleRMS''','Linestyle of the RMS grid'); +dispopt('''widthRMS''','Line width of the RMS grid'); +dispopt('''titleRMS''','0 / 1 (default): Show RMSD axis title'); + +disp('STD axis options:') +dispopt('''tickSTD''','STD values to plot gridding circles from origin'); +dispopt('''colSTD''','STD grid and tick labels color. Default: black'); +dispopt('''showlabelsSTD''','0 / 1 (default): Show or not the STD tick labels'); +dispopt('''styleSTD''','Linestyle of the STD grid'); +dispopt('''widthSTD''','Line width of the STD grid'); +dispopt('''titleSTD''','0 / 1 (default): Show STD axis title'); +dispopt('''limSTD''','Max of the STD axis (radius of the largest circle)'); + +disp('CORRELATION axis options:') +dispopt('''tickCOR''','CORRELATON grid values'); +dispopt('''colCOR''','CORRELATION grid color. Default: blue'); +dispopt('''showlabelsCOR''','0 / 1 (default): Show or not the CORRELATION tick labels'); +dispopt('''styleCOR''','Linestyle of the COR grid'); +dispopt('''widthCOR''','Line width of the COR grid'); +dispopt('''titleCOR''','0 / 1 (default): Show CORRELATION axis title'); + +end%function + +function [] = dispopt(optname,optval) + disp(sprintf('\t%s',optname)); + disp(sprintf('\t\t%s',optval)); +end \ No newline at end of file diff --git a/taylordiag_new.m b/taylordiag_new.m new file mode 100644 index 0000000..8525112 --- /dev/null +++ b/taylordiag_new.m @@ -0,0 +1,612 @@ +% TAYLORDIAG Plot a Taylor Diagram +% +% [hp ht axl] = taylordiag(STDs,RMSs,CORs,['option',value]) +% +% Plot a Taylor diagram from statistics of different series. +% +% INPUTS: +% STDs: Standard deviations +% RMSs: Centered Root Mean Square Difference +% CORs: Correlation +% +% Each of these inputs are one dimensional with same length. First +% indice corresponds to the reference serie for the diagram. For exemple +% STDs(1) is the standard deviation of the reference serie and STDs(2:N) +% are the standard deviations of the other series. +% +% Note that by definition the following relation must be true for all series i: +% RMSs(i) - sqrt(STDs(i).^2 + STDs(1)^2 - 2*STDs(i)*STDs(1).*CORs(i)) = 0 +% This relation is checked and if not verified an error message is sent. Please see +% Taylor's JGR article for more informations about this. +% You can use the ALLSTATS function to avoid this to happen, I guess ;-). You can get +% it somewhere from: http://codes.guillaumemaze.org/matlab +% +% OUTPUTS: +% hp: returns handles of plotted points +% ht: returns handles of the text legend of points +% axl: returns a structure of handles of axis labels +% +% LIST OF OPTIONS: +% For an exhaustive list of options to customize your diagram, please call the function +% without arguments: +% >> taylordiag +% +% SHORT TUTORIAL (see taylordiag_test.m for more informations): +% An easy way to get compute inputs is to use the ALLSTATS function you can get from: +% http://codes.guillaumemaze.org/matlab +% Let's say you gathered all the series you want to put in the Taylor diagram in a +% single matrix BUOY(N,nt) with N the number of series and nt their (similar) length. +% If BUOY(1,:) is the serie of reference for the diagram: +% for iserie = 2 : size(BUOY,1) +% S = allstats(BUOY(1,:),BUOY(iserie,:)); +% MYSTATS(iserie,:) = S(:,2); % We get stats versus reference +% end%for iserie +% MYSTATS(1,:) = S(:,1); % We assign reference stats to the first row +% Note that the ALLSTATS function can handle NaNs, so be careful to compute statistics +% with enough points ! +% Then you're ready to simply run: +% taylordiag(MYSTATS(:,2),MYSTATS(:,3),MYSTATS(:,4)); +% +% REF: K. Taylor +% Summarizing multiple aspects of model performance in a single diagram +% Journal of Geophysical Research-Atmospheres, 2001, V106, D7. +% +% Rev. by Guillaume Maze on 2010-02-10: Help more helpful ! Options now displayed by call. +% Copyright (c) 2008 Guillaume Maze. +% http://codes.guillaumemaze.org +% All rights reserved. +% Update by Christof Lorenz on 2011-01-27: Added some functions to allow +% 2D-arrays as input data; further added some features to change marker +% color and type of the datapoints. Type 'taylordiag' for infos about the +% new features + + + +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% * Redistributions of source code must retain the above copyright notice, this list of +% conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright notice, this list +% of conditions and the following disclaimer in the documentation and/or other materials +% provided with the distribution. +% * Neither the name of the Laboratoire de Physique des Oceans nor the names of its contributors may be used +% to endorse or promote products derived from this software without specific prior +% written permission. +% +% THIS SOFTWARE IS PROVIDED BY Guillaume Maze ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, +% INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +% PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Guillaume Maze BE LIABLE FOR ANY +% DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +% LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +% BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +% STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% + + + + +function varargout = taylordiag_new(varargin) + +%% +if nargin == 0 + disp_optionslist; + return +else + narg = nargin - 3; + if mod(narg,2) ~=0 + error('taylordiag.m : Wrong number of arguments') + end +end + +STDs = varargin{1}; +RMSs = varargin{2}; +CORs = varargin{3}; + +%% CHECK THE INPUT FIELDS: +apro = 1; +% +% for jj = 1:size(STDs, 1) +% % for ii = 1:size(STDs,2) +% di(jj,:) = fix(RMSs(jj,:)*apro)/apro - fix(sqrt(STDs(jj,:).^2 + ... +% STDs(jj,1)^2 - 2*STDs*STDs(jj,1).*CORs(jj,:))*apro)/apro; +% % keyboard +% end + +% Check of input data now allows 2D-arrays +% for ii = 1:size(STDs, 1) +% tmp(ii,:) = sqrt(abs(STDs(ii,:).^2 + STDs(ii,1)^2 - 2*STDs(ii,:).*STDs(ii,1).*CORs(ii,:))); +% di(ii, :) = RMSs(ii,:) - tmp(ii,:); +% keyboard +% end + +% if find(abs(di) > 1e-5) +% % help taylordiag.m +% [r,c] = find(di~=0); +% if length(r) == size(di,1) & length(c) == size(di,2) +% error(sprintf('taylordiag.m : Something''s wrong with ALL the datas\nYou must have:\nRMSs - sqrt(STDs.^2 + STDs(1)^2 - 2*STDs*STDs(1).*CORs) = 0 !')) +% else +% error(sprintf('taylordiag.m : Something''s wrong with data indice(s): [%i %i]\nYou must have:\nRMSs - sqrt(STDs.^2 + STDs(1)^2 - 2*STDs*STDs(1).*CORs) = 0 !',r,c)) +% end +% end + +%% IN POLAR COORDINATES: +rho = STDs; +theta = real(acos(CORs)); +dx = rho(1); % Observed STD + +%% + + + +%% BEGIN THE PLOT HERE TO GET AXIS VALUES: +hold off +cax = gca; +tc = get(cax,'xcolor'); +%ls = get(cax,'gridlinestyle'); +ls = '-'; % DEFINE HERE THE GRID STYLE +next = lower(get(cax,'NextPlot')); + +%% LOAD CUSTOM OPTION OF AXE LIMIT: +nopt = narg/2; foundrmax = 0; +for iopt = 4 : 2 : narg+3 + optvalue = varargin{iopt+1}; + switch lower(varargin{iopt}), case 'limstd', rmax = optvalue; foundrmax=1; end +end + +% make a radial grid +hold(cax,'on'); +if foundrmax==0 + maxrho = max(abs(rho(:))); +else + maxrho = rmax; +end +hhh = line([-maxrho -maxrho maxrho maxrho],[-maxrho maxrho maxrho -maxrho],'parent',cax); +set(cax,'dataaspectratio',[1 1 1],'plotboxaspectratiomode','auto') +v = [get(cax,'xlim') get(cax,'ylim')]; +ticks = sum(get(cax,'ytick')>=0); +delete(hhh); + + +% check radial limits and ticks +rmin = 0; +if foundrmax == 0; + rmax = v(4); +end +rticks = max(ticks-1,2); +if rticks > 5 % see if we can reduce the number + if rem(rticks,2) == 0 + rticks = rticks/2; + elseif rem(rticks,3) == 0 + rticks = rticks/3; + end +end +rinc = (rmax-rmin)/rticks; +tick = (rmin+rinc):rinc:rmax; + +%% LOAD DEFAULT PARAMETERS: +if find(CORs<0) + Npan = 2; % double panel +else + Npan = 1; +end +tickRMSangle = 135; +showlabelsRMS = 1; +showlabelsSTD = 1; +showlabelsCOR = 1; +colSTD = [0 0 0]; +colRMS = [0 0 0]; +colCOR = [0 0 0]; +% tickCOR(1).val = [1 .99 .95 .9:-.1:0]; +tickCOR(1).val = [1 0.99 0.95 0.9 0.8 0.6 0.4 0.2 0]; +tickCOR(2).val = [1 .99 .95 .9:-.1:0 -.1:-.1:-.9 -.95 -.99 -1]; +widthCOR = .8; +widthRMS = .8; +widthSTD = .8; +styleCOR = '-.'; +styleRMS = '--'; +styleSTD = ':'; +titleRMS = 1; +titleCOR = 1; +titleSTD = 1; +tickRMS = tick; rincRMS = rinc; +tickSTD = tick; rincSTD = rinc; +% New options, added 27.01.2011 +labelDTA = 1; +for i = 1:size(STDs, 2) +% pointclr{i} = 'r'; + pointclr{i} = [1 0 0]; + markertype{i} = '.'; +end +pointsize = 30; +normdta = 1; + +pointclr{1} = [000 000 000]/255; +pointclr{2} = [220 220 000]/255; +pointclr{3} = [152 078 163]/255; +pointclr{6} = [228 026 028]/255; +pointclr{5} = [055 126 184]/255; +pointclr{7} = [077 175 074]/255; +pointclr{4} = [255 127 000]/255; + + + + +%% LOAD CUSTOM OPTIONS: +nopt = narg/2; +for iopt = 4 : 2 : narg+3 + optname = varargin{iopt}; + optvalue = varargin{iopt+1}; + switch lower(optname) + + case 'tickrms' +% tickRMS = sort(optvalue); + tickRMS = 0:0.25:1.25; + rincRMS = (max(tickRMS)-min(tickRMS))/length(tickRMS); + + case 'showlabelsrms' + showlabelsRMS = optvalue; + case 'tickrmsangle' + tickRMSangle = optvalue; + case 'colrms' + colRMS = optvalue; + case 'widthrms' + widthRMS = optvalue; + case 'stylerms' + styleRMS = optvalue; + case 'titlerms' + titleRMS = optvalue; + + case 'tickstd' + tickSTD = sort(optvalue); + rincSTD = (max(tickSTD)-min(tickSTD))/length(tickSTD); + case 'showlabelsstd' + showlabelsSTD = optvalue; + case 'colstd' + colstd = optvalue; + case 'widthstd' + widthSTD = optvalue; + case 'stylestd' + styleSTD = optvalue; + case 'titlestd' + titleSTD = optvalue; + case 'npan' + Npan = optvalue; + + case 'tickcor' + tickCOR(Npan).val = optvalue; + case 'colcor' + colCOR = optvalue; + case 'widthcor' + widthCOR = optvalue; + case 'stylecor' + styleCOR = optvalue; + case 'titlecor' + titleCOR = optvalue; + case 'showlabelscor' + showlabelsCOR = optvalue; + % Added 27.01.2011 + case 'labeldta' + labelDTA = optvalue; + case 'pointclr' + pointclr = optvalue; + case 'pointsize' + pointsize = optvalue; + case 'markertype' + markertype = optvalue; + case 'normdata' + normdata = optvalue; + + end +end + + +%% CONTINUE THE PLOT WITH UPDATED OPTIONS: + +% define a circle + th = 0:pi/150:2*pi; + xunit = cos(th); + yunit = sin(th); +% now really force points on x/y axes to lie on them exactly + inds = 1:(length(th)-1)/4:length(th); + xunit(inds(2:2:4)) = zeros(2,1); + yunit(inds(1:2:5)) = zeros(3,1); +% plot background if necessary + if ~ischar(get(cax,'color')), +% ig = find(th>=0 & th<=pi); + ig = 1:length(th); + patch('xdata',xunit(ig)*rmax,'ydata',yunit(ig)*rmax, ... + 'edgecolor',tc,'facecolor',get(cax,'color'),... + 'handlevisibility','off','parent',cax); + end + +% DRAW RMS CIRCLES: + % ANGLE OF THE TICK LABELS + c82 = cos(tickRMSangle*pi/180); + s82 = sin(tickRMSangle*pi/180); + for ic = 1 : length(tickRMS) + i = tickRMS(ic); + iphic = find( sqrt(dx^2+rmax^2-2*dx*rmax*xunit) >= i ,1); + ig = find(i*cos(th)+dx <= rmax*cos(th(iphic))); + hhh = line(xunit(ig)*i+dx,yunit(ig)*i,'linestyle',styleRMS,'color',[.6 .6 .6],'linewidth',widthRMS,... + 'handlevisibility','off','parent',cax); + if showlabelsRMS + text((i+rincRMS/20)*c82+dx,(i+rincRMS/20)*s82, ... + [' ' num2str(i)],'verticalalignment','bottom',... + 'handlevisibility','off','parent',cax,'color',colRMS,'rotation',tickRMSangle-90, 'fontsize', 18) + end + end + +% DRAW DIFFERENTLY THE CIRCLE CORRESPONDING TO THE OBSERVED VALUE +% hhh = line((cos(th)*dx),sin(th)*dx,'linestyle','--','color',colSTD,'linewidth',1,... +% 'handlevisibility','off','parent',cax); + + +% DRAW STD CIRCLES: + % draw radial circles + for ic = 1 : length(tickSTD) + i = tickSTD(ic); + hhh = line(xunit*i,yunit*i,'linestyle',styleSTD,'color',[.6 .6 .6],'linewidth',widthSTD,... + 'handlevisibility','off','parent',cax); + if showlabelsSTD + if Npan == 2 + if length(find(tickSTD==0)) == 0 + text(0,-rinc/20,'0','verticalalignment','top','horizontalAlignment','center',... + 'handlevisibility','off','parent',cax,'color',colSTD, 'fontsize', 18); + end + text(i,-rinc/20, ... + num2str(i),'verticalalignment','top','horizontalAlignment','center',... + 'handlevisibility','off','parent',cax,'color',colSTD, 'fontsize', 18) + else + if length(find(tickSTD==0)) == 0 + text(-rinc/20,rinc/20,'0','verticalalignment','middle','horizontalAlignment','right',... + 'handlevisibility','off','parent',cax,'color',colSTD, 'fontsize', 18); + end + text(-rinc/20,i, ... + num2str(i),'verticalalignment','middle','horizontalAlignment','right',... + 'handlevisibility','off','parent',cax,'color',colSTD, 'fontsize', 18) + end + end + end + + set(hhh,'linestyle','-', 'color', 'k') % Make outer circle solid + +% DRAW CORRELATIONS LINES EMANATING FROM THE ORIGIN: + corr = tickCOR(Npan).val; + th = acos(corr); + cst = cos(th); snt = sin(th); + cs = [-cst; cst]; + sn = [-snt; snt]; + line(rmax*cs,rmax*sn,'linestyle',styleCOR,'color',[.6 .6 .6],'linewidth',widthCOR,... + 'handlevisibility','off','parent',cax) + + % annotate them in correlation coef + if showlabelsCOR + rt = 1.05*rmax; + for i = 1:length(corr) + text(rt*cst(i),rt*snt(i),num2str(corr(i)),... + 'horizontalalignment','center',... + 'handlevisibility','off','parent',cax,'color',colCOR, 'fontsize', 18); + if i == length(corr) + loc = int2str(0); + loc = '1'; + else + loc = int2str(180+i*30); + loc = '-1'; + end + end + end + +% AXIS TITLES + axlabweight = 'bold'; + ix = 0; + if Npan == 1 + if titleSTD + ix = ix + 1; + ax(ix).handle = ylabel(' ','color',colSTD,'fontweight',axlabweight, 'fontsize', 1, 'Position', ... + [-0.3 1.1 1]); +% ax(ix).handle = ylabel(' ','color',colSTD,'fontweight',axlabweight, 'fontsize', 18); + end + + if titleCOR + ix = ix + 1; + clear ttt + pos1 = 45; DA = 15; + lab = 'Correlation'; + c = fliplr(linspace(pos1-DA,pos1+DA,length(lab))); + dd = 1.1*rmax; ii = 0; + for ic = 1 : length(c) + ith = c(ic); + ii = ii + 1; + ttt(ii)=text(dd*cos(ith*pi/180),dd*sin(ith*pi/180),lab(ii)); + set(ttt(ii),'rotation',ith-90,'color',colCOR,'horizontalalignment','center',... + 'verticalalignment','bottom','fontsize',get(ax(1).handle,'fontsize'),'fontweight',axlabweight, 'fontsize', 18); + end + ax(ix).handle = ttt; + end + +% if titleRMS +% ix = ix + 1; +% clear ttt +% pos1 = tickRMSangle+(180-tickRMSangle)/2; DA = 15; pos1 = 160; +% lab = 'RMSD'; +% c = fliplr(linspace(pos1-DA,pos1+DA,length(lab))); +% dd = 1.05*tickRMS(1); +% dd = .95*tickRMS(2); +% ii = 0; +% for ic = 1 : length(c) +% ith = c(ic); +% ii = ii + 1; +% ttt(ii)=text(dx+dd*cos(ith*pi/180),dd*sin(ith*pi/180),lab(ii)); +% set(ttt(ii),'rotation',ith-90,'color',colRMS,'horizontalalignment','center',... +% 'verticalalignment','top','fontsize',get(ax(1).handle,'fontsize'),'fontweight',axlabweight, 'fontsize', 14); +% end +% ax(ix).handle = ttt; +% end + + + else + if titleSTD + ix = ix + 1; + ax(ix).handle =ylabel('Standard deviation','fontweight',axlabweight,'color',colSTD); + end + + if titleCOR + ix = ix + 1; + clear ttt + pos1 = 90; DA = 15; + lab = 'Correlation'; + c = fliplr(linspace(pos1-DA,pos1+DA,length(lab))); + dd = 1.1*rmax; ii = 0; + for ic = 1 : length(c) + ith = c(ic); + ii = ii + 1; + ttt(ii)=text(dd*cos(ith*pi/180),dd*sin(ith*pi/180),lab(ii)); + set(ttt(ii),'rotation',ith-90,'color',colCOR,'horizontalalignment','center',... + 'verticalalignment','bottom','fontsize',get(ax(1).handle,'fontsize'),'fontweight',axlabweight); + end + ax(ix).handle = ttt; + end + +% if titleRMS +% ix = ix + 1; +% clear ttt +% pos1 = 160; DA = 10; +% lab = 'RMSD'; +% c = fliplr(linspace(pos1-DA,pos1+DA,length(lab))); +% dd = 1.05*tickRMS(1); ii = 0; +% for ic = 1 : length(c) +% ith = c(ic); +% ii = ii + 1; +% ttt(ii)=text(dx+dd*cos(ith*pi/180),dd*sin(ith*pi/180),lab(ii)); +% set(ttt(ii),'rotation',ith-90,'color',colRMS,'horizontalalignment','center',... +% 'verticalalignment','bottom','fontsize',get(ax(1).handle,'fontsize'),'fontweight',axlabweight); +% end +% ax(ix).handle = ttt; +% end + end + + +% VARIOUS ADJUSTMENTS TO THE PLOT: + set(cax,'dataaspectratio',[1 1 1]), axis(cax,'off'); set(cax,'NextPlot',next); + set(get(cax,'xlabel'),'visible','on') + set(get(cax,'ylabel'),'visible','on') + % makemcode('RegisterHandle',cax,'IgnoreHandle',q,'FunctionName','polar'); + % set view to 2-D + view(cax,2); + % set axis limits + if Npan == 2 + axis(cax,rmax*[-1.15 1.15 0 1.15]); + line([-rmax rmax],[0 0],'color',tc,'linewidth',1.2); + line([0 0],[0 rmax],'color',tc); + else + axis(cax,rmax*[0 1.15 0 1.15]); +% axis(cax,rmax*[-1 1 -1.15 1.15]); + line([0 rmax],[0 0],'color',tc,'linewidth',1.2); + line([0 0],[0 rmax],'color',tc,'linewidth',2); + end + +% clr{1} = 'k'; +% clr{2} = 'c'; +% clr{3} = 'm'; +% clr{4} = 'y'; +% clr{5} = 'b'; +% clr{6} = 'r'; +% clr{7} = 'g'; +% FINALY PLOT THE POINTS: +% Edited 27.01.2011 by Christof Lorenz +% Function was edited to allow a timeseries of datasets + hold on + ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'; + for jj = 1 : size(STDs,1) + for ii = 1 : size(STDs,2) +% pp(ii)=polar(theta(ii),rho(ii)); + pp(jj,ii)=plot(rho(jj,ii)*cos(theta(jj,ii)),rho(jj,ii)*sin(theta(jj,ii)), 'linestyle', 'none'); + set(pp(jj,ii),'marker',markertype{ii},'markersize', pointsize); + set(pp(jj,ii),'color',pointclr{ii}); + if length(STDs)<=26 && labelDTA == 1 + X_lbl = rho(jj,ii)*cos(theta(jj,ii)); + Y_lbl = rho(jj,ii)*sin(theta(jj,ii)); + tt(jj,ii)=text(X_lbl,Y_lbl,ALPHABET(ii),'color',pointclr{ii}); + elseif length(STDs)<=26*2 && labelDTA == 1 + X_lbl = rho(jj,ii)*cos(theta(jj,ii)); + Y_lbl = rho(jj,ii)*sin(theta(jj,ii)); + tt(jj,ii)=text(X_lbl,Y_lbl,lower(ALPHABET(ii)),'color',pointclr{ii}); + elseif length(STDs)>26*2 && labelDTA == 1 + error('sorry I don''t how to handle more than 52 points labels !'); + else + tt = []; + end + end + end + if labelDTA == 1 + set(tt,'verticalalignment','bottom','horizontalalignment','right') + set(tt,'fontsize',20) + end + + +%%% OUTPUT + + +switch nargout + case 1 + varargout(1) = {pp}; + case 2 + varargout(1) = {pp}; + varargout(2) = {tt}; + case 3 + varargout(1) = {pp}; + varargout(2) = {tt}; + varargout(3) = {ax}; +end + + + +end%function + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +function varargout = disp_optionslist(varargin) + +disp('General options:') +dispopt('''Npan''',sprintf('1 or 2: Panels to display (1 for positive correlations, 2 for positive and negative correlations).\n\t\tDefault value depends on CORs')); + +disp('RMS axis options:') +dispopt('''tickRMS''','RMS values to plot gridding circles from observation point'); +dispopt('''colRMS''','RMS grid and tick labels color. Default: green'); +dispopt('''showlabelsRMS''','0 / 1 (default): Show or not the RMS tick labels'); +dispopt('''tickRMSangle''','Angle for RMS tick lables with the observation point. Default: 135 deg.'); +dispopt('''styleRMS''','Linestyle of the RMS grid'); +dispopt('''widthRMS''','Line width of the RMS grid'); +dispopt('''titleRMS''','0 / 1 (default): Show RMSD axis title'); + +disp('STD axis options:') +dispopt('''tickSTD''','STD values to plot gridding circles from origin'); +dispopt('''colSTD''','STD grid and tick labels color. Default: black'); +dispopt('''showlabelsSTD''','0 / 1 (default): Show or not the STD tick labels'); +dispopt('''styleSTD''','Linestyle of the STD grid'); +dispopt('''widthSTD''','Line width of the STD grid'); +dispopt('''titleSTD''','0 / 1 (default): Show STD axis title'); +dispopt('''limSTD''','Max of the STD axis (radius of the largest circle)'); + +disp('CORRELATION axis options:') +dispopt('''tickCOR''','CORRELATON grid values'); +dispopt('''colCOR''','CORRELATION grid color. Default: blue'); +dispopt('''showlabelsCOR''','0 / 1 (default): Show or not the CORRELATION tick labels'); +dispopt('''styleCOR''','Linestyle of the COR grid'); +dispopt('''widthCOR''','Line width of the COR grid'); +dispopt('''titleCOR''','0 / 1 (default): Show CORRELATION axis title'); + +disp('New functions (added 27.01.2011 by Christof Lorenz)') +dispopt('''labelDTA''','0 / 1 (default): Show data point labels'); +dispopt('''pointclr''','Color of the data points; can be different for each dataset -> cell array. Default: red'); +dispopt('''markertype''','Marker type of the data points; can be different for each dataset -> cell array. Default: Point'); +disp('For pointclr see color specifiers and for markertype see marker specifiers') +dispopt('''pointsize''', 'Size of the data points. Default: 20') +end%function + +function [] = dispopt(optname,optval) + disp(sprintf('\t%s',optname)); + disp(sprintf('\t\t%s',optval)); +end \ No newline at end of file diff --git a/taylordiag_test.m b/taylordiag_test.m new file mode 100644 index 0000000..4c41886 --- /dev/null +++ b/taylordiag_test.m @@ -0,0 +1,98 @@ +% This is a test to use the taylordiag.m ploting function. +% +% The data file taylordiag_egdata.mat is required together +% with the function "allstats" and "ptable". +% Both are available at: http://code.google.com/p/guillaumemaze/ +% +% +% This function runs the following command lines: +% +% clear +% load taylordiag_egdata.mat +% +% % Get statistics from time series: +% for ii = 2:size(BUOY,1) +% C = allstats(BUOY(1,:),BUOY(ii,:)); +% statm(ii,:) = C(:,2); +% end +% statm(1,:) = C(:,1); +% +% % Plot: +% figure +% ax = ptable([2 3],[2 2;4 6]); +% iw=2;jw=3; +% alphab = 'ABCDEFG'; +% +% subplot(iw,jw,2); +% plot(BUOY'); +% grid on,xlabel('time (day)');ylabel('heat fluxes (W/m^2)'); +% title(sprintf('%s: These are the different time series of daily heat fluxes (W/m2)','A'),'fontweight','bold'); +% +% subplot(iw,jw,5); hold on +% [pp tt axl] = taylordiag(squeeze(statm(:,2)),squeeze(statm(:,3)),squeeze(statm(:,4)),... +% 'tickRMS',[25:25:150],'titleRMS',0,'tickRMSangle',135,'showlabelsRMS',0,'widthRMS',1,... +% 'tickSTD',[25:25:250],'limSTD',250,... +% 'tickCOR',[.1:.1:.9 .95 .99],'showlabelsCOR',1,'titleCOR',1); +% +% for ii = 1 : length(tt) +% set(tt(ii),'fontsize',9,'fontweight','bold') +% set(pp(ii),'markersize',12) +% if ii == 1 +% set(tt(ii),'String','Buoy'); +% else +% set(tt(ii),'String',alphab(ii-1)); +% end +% end +% title(sprintf('%s: Taylor Diagram at CLIMODE Buoy','B'),'fontweight','bold'); +% +% tt = axl(2).handle; +% for ii = 1 : length(tt) +% set(tt(ii),'fontsize',10,'fontweight','normal'); +% end +% set(axl(1).handle,'fontweight','normal'); + + +clear +load taylordiag_egdata.mat + +% Get statistics from time series: +for ii = 2:size(BUOY,1) + C = allstats(BUOY(1,:),BUOY(ii,:)); + statm(ii,:) = C(:,2); +end +statm(1,:) = C(:,1); + +% Plot: +figure +ax = ptable([2 3],[2 2;4 6]); +iw=2;jw=3; +alphab = 'ABCDEFG'; + +subplot(iw,jw,2); +plot(BUOY'); +grid on,xlabel('time (day)');ylabel('heat fluxes (W/m^2)'); +title(sprintf('%s: These are the different time series of daily heat fluxes (W/m2)','A'),'fontweight','bold'); + +subplot(iw,jw,5); hold on +[pp tt axl] = taylordiag(squeeze(statm(:,2)),squeeze(statm(:,3)),squeeze(statm(:,4)),... + 'tickRMS',[25:25:150],'titleRMS',0,'tickRMSangle',135,'showlabelsRMS',0,'widthRMS',1,... + 'tickSTD',[25:25:250],'limSTD',250,... + 'tickCOR',[.1:.1:.9 .95 .99],'showlabelsCOR',1,'titleCOR',1); + +for ii = 1 : length(tt) + set(tt(ii),'fontsize',9,'fontweight','bold') + set(pp(ii),'markersize',12) + if ii == 1 + set(tt(ii),'String','Buoy'); + else + set(tt(ii),'String',alphab(ii-1)); + end +end +title(sprintf('%s: Taylor Diagram at CLIMODE Buoy','B'),'fontweight','bold'); + +tt = axl(2).handle; +for ii = 1 : length(tt) + set(tt(ii),'fontsize',10,'fontweight','normal'); +end +set(axl(1).handle,'fontweight','normal'); + diff --git a/testforwhitenoise.m b/testforwhitenoise.m new file mode 100644 index 0000000..874588f --- /dev/null +++ b/testforwhitenoise.m @@ -0,0 +1,40 @@ +function Q_ks = testforwhitenoise(inpt, rema0) +% The function tests a time-series (or a matrix of time-series) for white +% noise. Therefore, the time-series is first transformed to the spectral +% domain. From the fourier-coefficients, the PSD and the CDF are computed. +% With these, the function performs a KS-test. +%-------------------------------------------------------------------------- +if nargin < 2 + rema0 = true; +end + +[a, b] = spec(inpt); +[n, p] = size(a); + +if rema0 == true + a = a(2:end, :); + b = b(2:end, :); +end + + +[cn, sn] = emp_cdf(a, b); +keyboard +Q_ks = emp_ks_tst(sn); + + +scrsz = get(0,'ScreenSize'); +figure('OuterPosition',[1 scrsz(4)/2 scrsz(3)/4 scrsz(4)/4]) +title('KS Test for white noise') +plot(Q_ks(1,:), 'o', 'MarkerEdgeColor', 'k', ... + 'MarkerFaceColor', 'b', ... + 'MarkerSize', 8); +ytick{1} = 'H_0 Rejected'; +ytick{2} = 'H_0 Accepted'; + +set(gca, 'ytick', [0 1]); +set(gca, 'yticklabel', ytick); +xlabel('# of time-series') +axis([1 p 0 1]) +pbaspect([size(a,2)/2 2 1]) + + diff --git a/trajectory_kalman1.m b/trajectory_kalman1.m new file mode 100644 index 0000000..cd31e9b --- /dev/null +++ b/trajectory_kalman1.m @@ -0,0 +1,115 @@ +% Ausgleichungsproblem: Schiefer Wurf +% Beispiel eines Kalman Filters +% ------------------------------------------------------------------------- +% Christof Lorenz, IMK-IFU Garmisch-Partenkirchen, 2011 +% ------------------------------------------------------------------------- +function [xht, L, P] = trajectory_kalman1(L, R, Q, x0, P0); + +clc + +% Parameter der Trajektorie +g = 9.81; % Erdanziehung +x(1) = 0; % Startwert für x-Richtung +y(1) = 0; % Startwert für y-Richtung +vy(1) = 10; % Startgeschw. in x-Richtung +vx = 10; % Startgeschw. in y-Richtung +dt = 0.05; % Zeitintervall + + +% Berechnung der Referenztrajektorie +for i = 1:40 +x(i+1,1) = x(i) + vx*dt; +vy(i+1,1) = vy(i) - g*dt; +y(i+1,1) = y(i) + vy(i)*dt - 1/2*g*dt^2; +end + +% Zum Vergleich: Analytische Bestimmung der Kurve +y_ana = y(1) + vy(1)/vx*x - (g/(2*vx^2))*x.^2; + +% Bestimmung von Pseudo-Beobachtungen u. der dazugehörigen Fehler +if nargin < 1 + L = y + randn(length(y),1)/2; +end + +if nargin < 2 + R = 0.01; % Hier: Fehler konstant für alle Messungen +end + +A = [1 dt; 0 1]; % Time-relation matrix +B = [0.5*dt; 1]; % Control-input matrix +H = [1 0]; % Observation-relation matrix +u = -g*dt; + +% Anfangswerte +if nargin < 4 + x0 = [0; 5]; +end + +% Process noise (wird hier als gering angenommen) +if nargin < 3 + Q = 1e-5; +end +% Process covariance (Anfangswerte gleich genau und nicht korreliert) +if nargin < 5 + P0 = eye(2,2); +end +% Intitialisierung des Lösungsverktors +xht = zeros(2,1); + +for i = 1:length(L) + + % ---------------------- + % Predictor step + % ---------------------- + if i == 1 + x_pred = A*x0 + B*u; % State prediction + P_pred = A*P0*A' + Q; % Covariance prediction + else + x_pred = A*xht(:,i-1) + B*u; % State prediction + P_pred = A*P*A' + Q; % Covariance prediction + end + x_p(:,i) = x_pred; % Saving the prediciton + + % ---------------------- + % Corrector step + % ---------------------- + v = L(i) - H*x_pred; % Innovation + + if length(R) > 1 % Innovation covariance + S = H*P_pred*H' + R(i); % constant obs. error + else % Innovation covariance + S = H*P_pred*H' + R; % changing obs. error + end + + K = P_pred*H'*inv(S); % Kalman Gain + xht(1:2,i) = x_pred + K*v; % Update state + P = ([1 0; 0 1] - K*H)*P_pred; % Update covariance + +end + + + + + +keyboard + + + + + + + + + + + + + + + + + + + + + diff --git a/trajectory_kalman2.m b/trajectory_kalman2.m new file mode 100644 index 0000000..1e7a35e --- /dev/null +++ b/trajectory_kalman2.m @@ -0,0 +1,117 @@ +% Ausgleichungsproblem: Schiefer Wurf +% Beispiel einer kleinste-Quadrate-Ausgleichung mit nicht-linearen +% Beobachtungsgleichungen über extended Kalman Filter +% ------------------------------------------------------------------------- +% Christof Lorenz, IMK-IFU Garmisch-Partenkirchen, 2011 +% ------------------------------------------------------------------------- + +function [xht, L, P] = trajectory_kalman2(L, R, Q, x0, P0) + +% Parameter der Trajektorie +g = 9.81; % Erdanziehung +x(1) = 0; % Startwert für x-Richtung +y(1) = 0; % Startwert für y-Richtung +vy(1) = 10; % Startgeschw. in x-Richtung +vx = 10; % Startgeschw. in y-Richtung +dt = 0.05; % Zeitintervall + + +% Berechnung der Referenztrajektorie über Bewegungsgleichung +for i = 1:40 +x(i+1,1) = x(i) + vx*dt; % X-Komponente der Pos. +vy(i+1,1) = vy(i) - g*dt; % Geschwindigkeit in y +y(i+1,1) = y(i) + vy(i)*dt - 1/2*g*dt^2; % Y-Komponente der Pos. +end + +% Zum Vergleich: Analytische Bestimmung der Kurve +y_ana = y(1) + vy(1)/vx*x - (g/(2*vx^2))*x.^2; + +% Bestimmung von Pseudo-Beobachtungen (hier: alle Beob. gleich genau, keine +% Korrelationen) +if nargin < 1 + L = y + randn(length(y),1)/2; +end + +% Measurement noise +if nargin < 2 + R = 1e-2; +end + +% Vektor mit Näherungswerten für die Unbekannten +if nargin < 4 + x0 = [0; 9; 11]; +end +xht = x0; + + +% Kovarianzmatrix der Unbekannten (alle Näherungswerte gleich genau, keine +% Korrelationen) +if nargin < 5 + P0 = eye(3,3); +end + +% State relation matrix (-> hier Diagonalmatrix) +A = eye(3,3); + +% Kein Kontrollinput +B = zeros(3,1); +u = 0; + +% Process noise +if nargin < 3 + Q = 1e-5; +end + +for i = 1:length(L) + + % ---------------------- + % Predictor step + % ---------------------- + if i == 1 + x_pred = A*x0 + B*u; % State prediction + P_pred = A*P0*A' + Q; % Covariance prediction + else + x_pred = A*xht(:,i-1) + B*u; % State prediction + P_pred = A*P*A' + Q; % Covariance prediction + end + + + % ---------------------- + % Corrector step + % ---------------------- + % Aufstellen der H-matrix mit h_ij = (dfi/dXi)|x_0 + H(1,1) = 1; % Ableitung nach y_0 + H(1,2) = x(i)/x_pred(2); % Ableitung nach v_0y + H(1,3) = -x_pred(2)*x(i)/(x_pred(3)^2) ... + + g*x(i)^2/(x_pred(3)^3); % Ableitung nach v_0x + + % Berechnung von h(x,0) an der Stelle x_pred (-> NICHT-linear) + hx = x_pred(1) + x_pred(2)*x(i)/x_pred(3) - g*x(i)^2/(2*x_pred(3)^2); + + % Berechnung der Innovation + v = L(i) - hx; + + % Berechnung der Innovations-Kovarianz + if length(R) > 1 + S = H*P_pred*H' + R(i); + else + S = H*P_pred*H' + R; + end + + % Berechnung d. Kalman-Gains + K = P_pred*H'*inv(S); + + % Korrektor d. state u. covariance + xht(:,i) = x_pred + K*v; % Update state + P = (eye(3,3) - K*H)*P_pred; % Update covariance +end + +% L_a = xht(1,1) + xht(2,1)*x/xht(3,1) - g*x.^2/(2*xht(3,1)^2); +% L_b = xht(1,10) + xht(2,10)*x/xht(3,10) - g*x.^2/(2*xht(3,10)^2); +% L_c = xht(1,end) + xht(2,end)*x/xht(3,end) - g*x.^2/(2*xht(3,end)^2); + +keyboard + + + + diff --git a/trajectory_ls.m b/trajectory_ls.m new file mode 100644 index 0000000..45a2a7b --- /dev/null +++ b/trajectory_ls.m @@ -0,0 +1,112 @@ +% Ausgleichungsproblem: Schiefer Wurf +% Beispiel einer kleinste-Quadrate-Ausgleichung mit nicht-linearen +% Beobachtungsgleichungen +% ------------------------------------------------------------------------- +% Christof Lorenz, IMK-IFU Garmisch-Partenkirchen, 2011 +% ------------------------------------------------------------------------- +function [xht, Qxx, L, L_0] = trajectory_ls(L, P, x0); + +% Parameter der Trajektorie +g = 9.81; % Erdanziehung +x(1) = 0; % Startwert für x-Richtung +y(1) = 0; % Startwert für y-Richtung +vy(1) = 10; % Startgeschw. in x-Richtung +vx = 10; % Startgeschw. in y-Richtung +dt = 0.05; % Zeitintervall + +% -0.3661 +% 10.2422 +% 9.5049 + + +% Berechnung der Referenztrajektorie +for i = 1:40 +x(i+1,1) = x(i) + vx*dt; +vy(i+1,1) = vy(i) - g*dt; +y(i+1,1) = y(i) + vy(i)*dt - 1/2*g*dt^2; +end + +% Zum Vergleich: Analytische Bestimmung der Kurve +y_ana = y(1) + vy(1)/vx*x - (g/(2*vx^2))*x.^2; + +% Bestimmung von Pseudo-Beobachtungen +if nargin < 1 + L = y + randn(length(y),1)/2; +end + +% Kovarianzmatrix der Unbekannten +% 1. Annahme: alle Beobachtungen gleich genau -> Diagonalmatrix +if nargin < 2 + P = eye(length(L), length(L)); +end + +% Vektor mit Näherungswerten für die Unbekannten +if nargin < 3 +x0 = [0; 9; 11]; +end + +xht = x0; + + +% Berechnung des Vektors der Näherungswerte der Messwerte +% ("Näherungsbeobachtungen") +L_approx = xht(1) + xht(2)/xht(3)*x - g/(2*xht(3)^2)*x.^2; +L_0 = L_approx; + +% Berechnung des gekürzten Beobachtungsvektors +l = L - L_0; + +% Festlegung von Schleifenvariablen +crit = 1; +k = 0; + + +while crit == 1 + + k = k + 1; + + % Aufstellen der A-matrix mit a_ij = (dfi/dXi)|x_0 + A(:,1) = ones(size(L,1),1); % Ableitung nach y_0 + A(:,2) = x'./xht(2); % Ableitung nach v_0y + A(:,3) = -xht(2)*x'/xht(3)^2 + g/xht(3)^3*x'.^2; % Ableitung nach v_0x + + % Berechnugn des Vektors der Unbekannten (Nicht-linear -> lediglich + % Zuschläge!!!) + dx = inv(A'*P*A)*A'*P*l; + + % Berechnung der Verbesserungen + v = A*dx - l; + + % Kofaktorenmatrix der ausgegl. Parameter + Qxx = A'*P*A; + + % Berechnung der endgültigen Parameter: Näherunswerte + Zuschläge + xht = xht + dx; + + % Vektor der ausgeglichenen Messwerte (in der nächsten Iteration -> + % Näherungsbeobachtungen) + L_0 = A*dx + L_0; + + % Berechnung des Absolutgliedvektors für die nächste Iteration + l = L - L_0; + + % Ãœberprüfen des Abbruchkriteriums (falls Zuschläge klein genug sind) + if norm(dx) < 1e-15 + crit = 0; + sprintf('Lösung konvergiert nach %d Iterationen.', k) + end + + % Falls Lösungen nicht konvergieren (z.B. schlecht gewählte + % Näherungswerte) -> Schleifenabbruch + if k == 100 + crit = 0; + sprintf('Lösung konvergiert nicht!') + end + +end + + keyboard + + + + diff --git a/trend.m b/trend.m new file mode 100644 index 0000000..8d84fc7 --- /dev/null +++ b/trend.m @@ -0,0 +1,17 @@ +function [xht, yht] = trend(inpt, deg); + + +n = length(inpt); +x = (0:n-1)'; + +A = ones(n,1); + +for i = 1:deg + A = [A x.^i]; +end + + +xht = inv(A'*A)*A'*inpt; +yht = A*xht; + + diff --git a/ts2gmt.m b/ts2gmt.m new file mode 100644 index 0000000..756b033 --- /dev/null +++ b/ts2gmt.m @@ -0,0 +1,66 @@ +function [] = ts2gmt(data, fname, dtatype) +% Conversion from Matlab time-series to ascii-files which can be read and +% plotted by GMT. +%-------------------------------------------------------------------------- +% Input: data [m x n] Matrix which contains one (or several) +% time-series. The first three (four) columns +% must contain (day), month, year and matlab +% timestamp. Data will be read from the fourth +% (fifth) column. +%-------------------------------------------------------------------------- +% Author: Christof Lorenz +% Date: June 2013 +%-------------------------------------------------------------------------- +% Uses: +%-------------------------------------------------------------------------- +% Updates: +%-------------------------------------------------------------------------- + +if nargin < 3, dtatype = 'monthly'; end +if data(1,1) == 0, data = data(2:end, :); end + +if strcmp(dtatype, 'monthly') % Monthly Data + + tvec = data(:, 2)*10000 + data(:, 1)*100 + 15; + out = [tvec data(:, 4:end)]; + + frmt = repmat('%g ', 1, size(data, 2) - 3); + frmt = ['%i ', frmt, ' \n']; + + fid = fopen(fname, 'w'); + fprintf(fid, frmt, out'); + fclose(fid); + +elseif strcmp(dtatype, 'daily') % Daily Data + + tvec = data(:, 3)*10000 + data(:, 2)*100 + data(:, 1); + out = [tvec data(:, 5:end)]; + + frmt = repmat('%g ', 1, size(inpt, 2) - 4); + frmt = ['%i ', frmt, ' \n']; + + fid = fopen(fname, 'w'); + fprintf(fid, frmt, out'); + fclose(fid); + +elseif strcmp(dtatype, 'mean_monthly') + +% tvec = 20000000 + data(:,1)*100 + 15; +% out = [tvec data(:, 2:end)]; + + frmt = repmat('%g ', 1, size(data, 2)-1); + frmt = ['%i ', frmt, ' \n']; + + fid = fopen(fname, 'w'); + fprintf(fid, frmt, data'); + fclose(fid); + +end + + + + + + + + diff --git a/ts2netcdf.m b/ts2netcdf.m new file mode 100644 index 0000000..13c6e85 --- /dev/null +++ b/ts2netcdf.m @@ -0,0 +1,52 @@ +function [] = ts2netcdf(inpt, areanms) +% The function reads data from a cell-array and stores its elements in a +% netcdf-file. Some +fprintf('\n') +fprintf('---------------------------------------------------- \n') +fprintf('Conversion from a MATLAB-cell-array to a netcdf-file \n') +fprintf('---------------------------------------------------- \n') +fprintf(' \n') +outnme = input('Enter output filename: ', 's'); +units = input('Enter units of variable: ', 's'); +longnme = input('Enter variable description: ', 's'); +mval = input('Enter identifier for missing values: ', 's'); +fprintf('---------------------------------------------------- \n') +fprintf('Computing.... ') + +mnths = inpt(2:end, 1); +yrs = inpt(2:end, 2); +inttme = yrs*10000 + mnths*100 + ones(length(mnths),1)*15; + + + +ncid = netcdf.create(outnme, 'CLOBBER'); +time_dim_id = netcdf.defDim(ncid, 'time', length(inttme)); +time_var_id = netcdf.defVar(ncid, 'time', 'double', time_dim_id); +netcdf.putAtt(ncid, time_var_id, '_CoordinateAxisType', 'Time'); + + +lon_dim_id = netcdf.defDim(ncid, 'longitude', 1); +lat_dim_id = netcdf.defDim(ncid, 'latitude', 1); +lon_var_id = netcdf.defVar(ncid, 'longitude', 'double', lon_dim_id); +lat_var_id = netcdf.defVar(ncid, 'latitude', 'double', lat_dim_id); + + +for i = 1:length(areanms) + data_var_id(i) = netcdf.defVar(ncid, areanms{i}, 'double', [time_dim_id]); + netcdf.putAtt(ncid, data_var_id(i), 'units', units); + netcdf.putAtt(ncid, data_var_id(i), 'long_name', longnme); + netcdf.putAtt(ncid, data_var_id(i), 'missing_value', -99999); +end + +netcdf.endDef(ncid); +netcdf.putVar(ncid, time_var_id, inttme); +netcdf.putVar(ncid, lon_var_id, 0); +netcdf.putVar(ncid, lat_var_id, 0); + +for i = 1:length(areanms) + netcdf.putVar(ncid, data_var_id(i), inpt(2:end, i+3)); +end + +netcdf.close(ncid); + +fprintf('Done \n') diff --git a/tsbias.m b/tsbias.m new file mode 100644 index 0000000..92fbaa8 --- /dev/null +++ b/tsbias.m @@ -0,0 +1,19 @@ +function B = tsbias(obs, sim, type); + +[obs, sim] = find_sim_tspts(obs, sim); +fld1 = obs(2:end, 4:end); +fld2 = sim(2:end, 4:end); + +mask = ones(size(fld1)); + +mask(isnan(fld1)) = 0; +mask(isnan(fld2)) = 0; + +fld1(mask == 0) = 0; +fld2(mask == 0) = 0; + +if strcmp(type, 'rel') + B = (sum(fld1) - sum(fld2))./sum(fld1); +elseif strcmp(type, 'abs') + B = (sum(fld1) - sum(fld2))./sum(mask); +end diff --git a/tseval.m b/tseval.m new file mode 100644 index 0000000..0fdf6b2 --- /dev/null +++ b/tseval.m @@ -0,0 +1,209 @@ +function [] = tseval(ref, varargin) + + +miss = -9999; + +nr_sets = length(varargin); +nr_tsps = length(ref); + +cxis1 = [0 500]; +cxis2 = [-50 50]; + +% Assumption: Reference and the other datasets consist of the same time +% period + +% First/second column: Month/year +syr = ref{1,2}; +eyr = ref{end,2}; + +% 0. Apply a mask to all datasets based on the reference +for i = 1:nr_tsps + for j = 1:nr_sets + varargin{j}{i,3}(ref{i,3} == miss) = miss; + end +end + + +% 1. Compute annual averages, absolute and relative differences +ann_ref = spatmn(ref, [syr eyr], 'annual_1', [1 2 3], -9999, 0); + +for i = 1:nr_sets + ann_evl{i} = spatmn(varargin{i}, [syr eyr], 'annual_1', [1 2 3], -9999, 0); + d_ann{i} = ann_evl{i} - ann_ref; + d_ann_rel{i} = (d_ann{i}./ann_ref)*100; +end + + +% 2. Compute climatological monthly means +mnth_ref = spatmn(ref, [syr eyr], 'monthly_1', [1 2 3], -9999, 0); +for i = 1:nr_sets + mnth_evl(:,i) = spatmn(varargin{i}, [syr eyr], 'monthly_1', [1 2 3], -9999, 0); +end + + +% 3. Compute spatial correlation maps (STILL TO DO!!!) +% for i = 1:nr_tsps +% F_ref(i,:) = ref{i}(:); +% +% for j = 1:nr_sets +% F_evl( + +% 3. Compute time-series +load indexfile3.asc +load ctchnms.mat + +catch_ids = cell2mat(ctchnms(1:50, 4)); +catch_areas = cell2mat(ctchnms(1:50, 2)); + + +ref_ts = spataggmn(ref, indexfile3, catch_ids, 'clms', [1 2 3]); +ref_ts_mn = [ref_ts(1, 4:end); mean(ref_ts(2:end, 4:end))]; + +for i = 1:nr_sets + evl_ts{i} = spataggmn(varargin{i}, indexfile3, catch_ids, 'clms', [1 2 3]); + d_ts{i} = evl_ts{i}(2:end, 4:end) - ref_ts(2:end, 4:end); + if i == 1 + evl_ts_mn(1:2,:) = [ref_ts(1, 4:end); mean(evl_ts{i}(2:end, 4:end))]; + else + evl_ts_mn = [evl_ts_mn; mean(evl_ts{i}(2:end, 4:end))]; + end +end + +% 3. Compute scatter plots +for i = 1:nr_sets + [a1(i) b1(i)] = fitline(ref_ts_mn(2,:)', evl_ts_mn(i+1,:)'); +end + + + +if length(varargin) == 2 + for i = 1:nr_sets + tmp = matrixcorr(ref_ts(2:end, 4:end), evl_ts{i}(2:end, 4:end)); + R(:,i) = tmp'; + end + [a2 b2] = fitline(R(:,1), R(:,2)); +end + + + + + + + +% 4. Create some plots +load coast +theta = 89.75:-0.5:-89.75; +lambda = -179.75:0.5:179.75; + + +figure('Name', 'Annual mean') +for i = 1:nr_sets+1 + subplot(1, nr_sets+1,i); + if i == 1 + imagesc(lambda, theta, ann_ref); + title('Reference') + else + imagesc(lambda, theta, ann_evl{i-1}); + tlte = ['Dataset', num2str(i)]; + title(tlte); + end + + axis xy + hold on + plot(long, lat, 'k', 'linewidth', 1.5); + pbaspect([2 1 1]); + caxis(cxis1); +end + +figure('Name', 'Annual mean difference') +for i = 1:nr_sets + subplot(1, nr_sets,i); + imagesc(lambda, theta, d_ann{i}); + axis xy + hold on + plot(long, lat, 'k', 'linewidth', 1.5); + pbaspect([2 1 1]); + tlte = ['Dataset', num2str(i)]; + title(tlte); + caxis(cxis2); +end + +figure('Name', 'Annual mean difference (relative)') +for i = 1:nr_sets + subplot(1, nr_sets,i); + imagesc(lambda, theta, d_ann_rel{i}); + axis xy + hold on + plot(long, lat, 'k', 'linewidth', 1.5); + pbaspect([2 1 1]); + tlte = ['Dataset ', num2str(i)]; + title(tlte); + caxis([-100 100]) +end + +for i = 1:nr_sets + fname = ['Monthly difference, dataset ', num2str(i)]; + figure('Name', fname); + for j = 1:12 + subplot(3,4,j) + imagesc(lambda, theta, mnth_evl{j,i} - mnth_ref{j}); + axis xy + hold on + plot(long, lat, 'k', 'linewidth', 1.5); + pbaspect([2 1 1]); + caxis(cxis2); + end +end + + +x = 0:ceil(max(ref_ts_mn(2,:))); +axs1 = min(min([ref_ts_mn(2,:); evl_ts_mn(2:end,:)])); +axs2 = max(max([ref_ts_mn(2,:); evl_ts_mn(2:end,:)])); +for i = 1:nr_sets + figure + scatter(ref_ts_mn(2,:), evl_ts_mn(i+1,:), 'filled'); + hold on + plot(x, a1(i)*x+b1(i)); + xlabel('Reference') + ytlte = ['Dataset ', num2str(i)]; + ylabel(ytlte); + axis([axs1 axs2 axs1 axs2]); +end + +if length(varargin) == 2 + x = 0:0.1:1; + axs1 = min(min([R(1,:) R(2,:)])); + axs2 = max(max([R(1,:) R(2,:)])); + figure + scatter(R(:,1), R(:,2), 'filled'); + hold on + plot(x, a2*x+b2) + xlabel('Correlation Reference vs. Dataset 1') + ylabel('Correlation Reference vs. Dataset 2'); + axis([0 1 0 1]); +end + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tsinterp.m b/tsinterp.m new file mode 100644 index 0000000..4e865b7 --- /dev/null +++ b/tsinterp.m @@ -0,0 +1,34 @@ +function tsb = tsinterp(inpt, clms, interpflg); + +mn = inpt(:, clms(1)); +yr = inpt(:, clms(2)); +ts = inpt(:, clms(3)); + +if strcmp(interpflg, 'monmean') + pstn = find(isnan(ts)); + tsb = ts; + for i = 1:pstn + mn_indx = find(mn(pstn(i)) == mn); + flg = isfinite(ts(mn_indx)); + mn_vals = ts(mn_indx(flg ~= 0)); + tsb(pstn(i)) = mean(mn_vals); + end + + +elseif strcmp(interpflg, 'interpol') + + pstn = find(~isnan(ts)); + tsn = ts(~isnan(ts)); + + tsb(:,1) = interp1(pstn, tsn, 1:length(ts)); +end + +srn = datenum(yr, mn, ones(length(mn),1)*15); + +tsb = [mn yr srn tsb]; + + + + + + \ No newline at end of file diff --git a/tskalman.m b/tskalman.m new file mode 100644 index 0000000..93cd307 --- /dev/null +++ b/tskalman.m @@ -0,0 +1,390 @@ +clear all + + + +load indexfile3.asc + +% ------------------------------------------------------------------------- +% Compute the temporal covariance from a model +% ------------------------------------------------------------------------- +% Note: This is still "under construction"..... +% ------------------------------------------------------------------------- +load /media/storage/Data/Precipitation/MERRA_LND/MERRA_LND_PREC_360x720.mat +load /media/storage/Data/Evaporation/MERRA_LND/MERRA_LND_ET_360x720.mat +load /media/storage/Data/Runoff/MERRA_LAND/MERRA_LND_R_360x720.mat + +mask = zeros(360, 720); +mask(indexfile3 == 193) = 1; + +mnths = cell2mat(merra_lnd_prec(:, 3)); +yr = cell2mat(merra_lnd_prec(:, 4)); + +% Compute P - E - R from MERRA-land +for i = 1:384 + PER{i,1} = merra_lnd_prec{i,8} - merra_lnd_et{i,8} - merra_lnd_r{i,8}; + PER{i,1}(isnan(PER{i,1})) = -9999; +end +clear merra* + +[F_catch, c_indx] = cell2catchmat(PER, mask); + +% Remove the mean and the trend from the catchment matrix +F_catch = detrend(F_catch); +F_catch = F_catch - ones(384,1)*mean(F_catch, 1); + +for i = 1:12 + indx = find(mnths == i); + mnth_mn = mean(F_catch(indx, :), 1); + F_catch(indx, :) = F_catch(indx, :) - ones(32,1)*mnth_mn; +end + + + + +% Until this step, the computation is (hopefully) correct + +% The "innovative" part: +% Compute the temporal covariance +covswitch = 3; +if covswitch == 1 + mdl_cov = 1/384*F_catch'*F_catch; % Temporal autocorrelation +elseif covswitch == 2 + for i = 1:84 + mdl_cov{i,1} = F_catch(:,i)'*F_catch(:,i); + end +elseif covswitch == 3 + % The Kurtenbach approach + cov1 = 1/384*(F_catch'*F_catch); + cov2 = 1/383*(F_catch(2:end,:)'*F_catch(1:end-1,:)); + + % As cov1 is highly undertermined (rank cov1 = 372), a regularization + % factor is applied + cov1 = cov1 + eye(1529); + + mdl_cov = cov1 - cov2*inv(cov1)*cov2'; + B = cov2*inv(cov1); +end + +% Cleaning up.... +clear F_catch PER i indx mnth_mn mnth yr + + + +% ------------------------------------------------------------------------- +% Compute the "observed" time-series +% ------------------------------------------------------------------------- +load /media/storage/Data/Runoff/GRDC/GRDC_R.mat +load /media/storage/Data/Mflux/MERRA/MERRA_VIMFD.mat + +R = grdc_r(278:361,4); + + +vimfd = spataggmn(merra_vimfd, indexfile3, 193, 'clms', [4 5 9], ... + 'theta', (89.75:-0.5:-89.75)'); + +% Check for the correct times... +grdc_r(278,1:2) +grdc_r(361,1:2) +vimfd(290,1:2) +vimfd(373,1:2) + +vimfd = vimfd(290:373,4); + +dSdt = -vimfd-R; + +keyboard +% Cleaning up... +clear grdc_r merra* vimfd i R + + + + +% ------------------------------------------------------------------------- +% Prepare the GRACE time-series +% ------------------------------------------------------------------------- +load /media/storage/Data/GRACE/casmmass.mat +grace = [casmmass(7:12,5); zeros(360, 720); casmmass(13:91,5)]; + +% Interpolate to fill the gap in June 2003... +grace{7,1} = 1/2*(grace{6,1}+grace{8,1}); + +% Compute the derivatives through central differences +for i = 2:length(grace)-1 + dMdt{i-1,1} = (grace{i+1} - grace{i-1})/2; + dMdt{i-1,1} = [dMdt{i-1,1}(:, 361:end) dMdt{i-1,1}(:, 1:360)]; +end + +dmdt_catch = cell2catchmat(dMdt, mask); +dmdt_catch = dmdt_catch'; + +% keyboard +% Cleaning up... +clear grace casmmass i + + +% ------------------------------------------------------------------------- +% Set up the variables for the Kalman Filter +% ------------------------------------------------------------------------- + +% Initial state vector +x_0 = dmdt_catch(:,1); + +% Process noise +Q = cov2/1000; + +% Observation noise +R = [1; ones(length(x_0),1)/400]; + +% Compute the observation relation matrix +A = area_wghts((89.75:-0.5:-89.75)', 0.5, 'mat', 'haversine'); +A_vec = A(:); +mask_vec = mask(:); + +catch_vec = A_vec(c_indx).*mask_vec(c_indx); +catch_vec = catch_vec/sum(catch_vec); + +H = [catch_vec'; eye(length(x_0))]; + +% Compute the observation vectors +y = [dSdt'; dmdt_catch]; + +% Cleaning up... +clear A A_vec mask_vec catch_vec + + +% ------------------------------------------------------------------------- +% Start filtering........ +% ------------------------------------------------------------------------- +for i = 1:20 + % Predictor step + if i == 1 + x_p(:,i) = x_0; + if covswitch == 1 | covswitch == 3 + P_p = Q; + elseif covswitch == 2 + P_p = Q{1}; + end + else + + if covswitch == 1 + x_p(:,i) = x_k(:,i-1); + P_p = P_k{i-1} + Q; + + elseif covswitch == 2 + x_p(:,i) = x_k(:,i-1); + P_p = P_k{i-1} + Q{i}; + elseif covswitch == 3 + x_p(:,i) = B*x_k(:,i-1); + P_p = B*P_k{i-1}*B' + Q; + end + end + + % Corrector step + v_k = y(:,i) - H*x_p(:,i); + S_k = H*P_p*H' + diag(R); + K_k = P_p*H'*inv(S_k); + + x_k(:,i) = x_p(:,i) + K_k*v_k; + P_k{i} = P_p - K_k*S_k*K_k'; + + % Cleaning up.... + clear v_k S_k K_k P_p +end +clear i + + + + +% ------------------------------------------------------------------------- +% Doing the ugly analysis part... +% ------------------------------------------------------------------------- +F_out = catchmat2cell(x_k', c_indx, 360, 720); +F_in = catchmat2cell(dmdt_catch', c_indx, 360, 720); + +for i = 1:84 + F_out{i,2} = F_out{i,1}(165:225, 200:250); + F_in{i,2} = F_in{i,1}(165:225, 200:250); +end + + +% Create some nice plots.. +plotswitch = 1; + +if plotswitch == 1 + theta = 89.75:-0.5:-89.75; + lambda = -179.75:0.5:179.75; + load coast + + for i = 1:12 + subplot(4,3,i) + h = worldmap(F_out{i,2}, [2 7.75 -80.25]); + geoshow(flipud(F_out{i,2}), [2, 7.75, -80.25], 'DisplayType', 'texturemap') + geoshow(lat, long) + caxis([-250 250]) + end + + for i = 13:24 + subplot(4,3,i-12) + h = worldmap(F_out{i,2}, [2 7.75 -80.25]); + geoshow(flipud(F_out{i,2}), [2, 7.75, -80.25], 'DisplayType', 'texturemap') + geoshow(lat, long) + caxis([-250 250]) + end + + for i = 25:36 + subplot(4,3,i-24) + h = worldmap(F_out{i,2}, [2 7.75 -80.25]); + geoshow(flipud(F_out{i,2}), [2, 7.75, -80.25], 'DisplayType', 'texturemap') + geoshow(lat, long) + caxis([-250 250]) + end +end + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +% function [] = tskalman(modlts, obsts) + +% % Remove the trend and the mean from the TS +% modlts_c = modlts - mean(modlts); +% obsts_c = obsts - mean(obsts); +% +% +% +% % modlts_c = detrend(modlts_c); +% % obsts_c = detrend(obsts_c); +% +% n = length(modlts); +% +% for i = 2:n +% A(i-1,1) = modlts_c(i)/modlts_c(i-1); +% end +% +% sig_r = 5; +% x_0 = 0; +% +% R = 5; +% Q = 5; +% +% for i = 1:n +% % Predictor step +% if i == 1 +% x_p(1) = x_0; +% P_p(1) = R; +% else +% x_p(i) = A(i-1)*x_k(i-1); +% P_p(i) = A(i-1)*P_k(i-1)*A(i-1) + R; +% end +% +% % Corrector step +% y_k = obsts_c(i) - x_p(i); +% S_k = P_p(i) + Q; +% K_k = P_p(i)/S_k; +% +% x_k(i) = x_p(i) + K_k*y_k; +% P_k(i) = P_p(i) - K_k*S_k*K_k; +% end +% keyboard +% +% +% % [rsdl, estts, mn, trnd, yrcle] = centerts(modlts_c); +% % +% % +% % xht = [yrcle]; +% % keyboard +% % sig_r = 5; +% % x_0 = 0; +% % +% % B = [repmat(eye(12), [n/12, 1])]; +% % +% % keyboard +% % for i = 1:n +% % % Predictor step +% % if i == 1 +% % x_p(1) = x_0 + B(i,:)*xht + rsdl(i); +% % P_p(1) = rsdl(1); +% % else +% % x_p(i) = x_k(i-1) + B(i,:)*xht + rsdl(i); +% % P_p(i) = P_k(i-1) + rsdl(i); +% % end +% % +% % % Corrector step +% % y_k = obsts_c(i) - x_p(i); +% % S_k = P_p(i) + sig_r; +% % K_k = P_p(i)/S_k; +% % +% % x_k(i) = x_p(i) + K_k*y_k; +% % P_k(i) = P_p(i) - K_k*S_k*K_k; +% % end +% % +% % keyboard +% +% +% +% diff --git a/tskinvsq.m b/tskinvsq.m new file mode 100644 index 0000000..a8be2d1 --- /dev/null +++ b/tskinvsq.m @@ -0,0 +1,86 @@ +function xhat = tskinvsq(tskin, q, time, period, cswitch, clms_t, clms_q) + + + + +sind_t = find(cell2mat(tskin(:,clms_t(1))) == 1 & ... + cell2mat(tskin(:,clms_t(2))) == time(1)); +eind_t = find(cell2mat(tskin(:,clms_t(1))) == 12 & ... + cell2mat(tskin(:,clms_t(2))) == time(2)); + +sind_q = find(cell2mat(q(:,clms_q(1))) == 1 & ... + cell2mat(q(:,clms_q(2))) == time(1)); +eind_q = find(cell2mat(q(:,clms_q(1))) == 12 & ... + cell2mat(q(:,clms_q(2))) == time(2)); + + +fields = tskin(sind_t:eind_t, clms_t); +fields(:,4) = q(sind_q:eind_q, clms_q(3)); + +clear tskin q + +skintemp = comp_spat_mean(fields, time, period, [1 2 3], -9999, 0); +qtot = comp_spat_mean(fields, time, period, [1 2 4], -9999, 0); + +load continents.asc +mask_t = zeros(size(fields{1,3})); + +if cswitch == 0 + mask_t = mask_t + 1; % All values +elseif cswitch == 1 + mask_t(continents ~= -9999) = 1; % Continental values + mask_t(continents == 4) = 0; +elseif cswitch == 2 + mask_t(continents == -9999) = 1; % Oceanic values + mask_t(continents == 4) = 1; +elseif cswitch == 3 + mask_t(continents ~= -9999) = 1; % Without ice shelf + mask_t(continents == 4) = 0; +elseif cswitch == 4 + mask_t(continents ~= -9999) = 1; % Without polar regions + mask_t(continents == 0) = 0; + mask_t(continents == 4) = 0; +end + +if iscell(skintemp) + for i = 1:length(skintemp) + [tmp1, indx] = sort(skintemp{i}(mask_t ~= 0), 'ascend'); + tmp2 = qtot{i}(mask_t ~= 0); + tmp2 = tmp2(indx); + + A = [ones(length(tmp1),1) log(tmp1)]; + l = log(tmp2); + + xhat(:,i) = inv(A'*A)*A'*l; + tt = 230:5:305; + ln = exp(xhat(1,i))*tt.^xhat(2,i); + loglog(tmp1(1:100:end), tmp2(1:100:end), 'xy'); + hold on + loglog(tt, ln, 'g') + end +else + [tmp1, indx] = sort(skintemp(mask_t ~= 0), 'ascend'); + tmp2 = qtot(mask_t ~= 0); + tmp2 = tmp2(indx); + + A = [ones(length(tmp1),1) log(tmp1)]; + l = log(tmp2); + + xhat = inv(A'*A)*A'*l; + + tt = 230:5:305; + ln = exp(xhat(1))*tt.^xhat(2); + loglog(tmp1, tmp2, 'o'); + hold on + + loglog(tt, ln, 'r') +end + + + + + + + + + \ No newline at end of file diff --git a/tsmean.m b/tsmean.m new file mode 100644 index 0000000..d0feacb --- /dev/null +++ b/tsmean.m @@ -0,0 +1,134 @@ +function otpt = tsmean(inpt, tscale, varargin) + +pp = inputParser; +pp.addRequired('inpt', @(x)ismatrix(x)); % Input dataset (cell) +pp.addRequired('tscale', @isstr); % Time-scale (monthly, annual, ...) + +pp.addParamValue('datatype', 'full'); +pp.addParamValue('period', 0, @isnumeric); % Period for averaging +pp.addParamValue('clms', [1 2 4], @isnumeric); % Columns with m/y/dta +pp.addParamValue('miss', -9999, @(x) ... % Missing value (-9999) + (isnumeric(x) | strcmp(x, 'NaN'))); +pp.addParamValue('method', 'mean') +pp.parse(inpt, tscale, varargin{:}) + +period = pp.Results.period; +datatype = pp.Results.datatype; +clms = pp.Results.clms; +miss = pp.Results.miss; +method = pp.Results.method; + +clear pp + +if strcmp(miss, 'NaN') + inpt(isnan(inpt)) = -99999; + miss = -99999; +end + +if period ~= 0 + inpt = findtstps_ts(inpt, [period(1) period(2)]); +end + +if inpt(1, clms(1)) == 0 + indvec = inpt(1, clms(3):end); + inpt = inpt(2:end, :); +end + +if strcmp(tscale, 'annual') + + yrs = unique(inpt(:, clms(2))); + + for i = 1:length(yrs) + indx = find(inpt(:,clms(2)) == yrs(i)); + flds = inpt(indx, clms(3):end); + + if strcmp(method, 'mean') + otpt(i,:) = [yrs(i) nanmean(flds,1)]; + elseif strcmp(method, 'sum') + otpt(i,:) = [yrs(i) sum(flds,1)]; + elseif strcmp(method, 'wmean') + nrd = eomday(inpt(indx, clms(2)), inpt(indx, clms(1))); + flds = flds.*(nrd*ones(1, size(flds,2))); + otpt(i,:) = [yrs(i) sum(flds, 1)./sum(nrd)]; + end + end + +elseif strcmp(tscale, 'ltm') + flds = inpt(:, clms(3):end); + if strcmp(method, 'mean') + otpt = nanmean(flds,1); + elseif strcmp(method, 'sum') + otpt = nansum(flds,1); +% elseif strcmp(method, 'wmean') +% nrd = eomday(inpt(:, clms(2)), inpt(:, clms(1))); +% flds = flds.*(nrd*ones(1, size(flds,2))); +% otpt = nansum(flds, 1)./sum(nrd); + end + + + + +elseif strcmp(tscale, 'monthly') + for i = 1:12 + indx = find(inpt(:,clms(1)) == i); + otpt(i,:) = [i nanmean(inpt(indx, clms(3):end),1)]; + end + +elseif strcmp(tscale, 'seasonal') + + for i = 1:4 + if i == 1 + indx_1 = find(inpt(:, clms(1)) == 12); + indx_2 = find(inpt(:, clms(1)) == 1); + indx_3 = find(inpt(:, clms(1)) == 2); + else + indx_1 = find(inpt(:, clms(1)) == i*3-3); + indx_2 = find(inpt(:, clms(1)) == i*3-2); + indx_3 = find(inpt(:, clms(1)) == i*3-1); + end + otpt(i,:) = [i nanmean(inpt([indx_1; indx_2; indx_3], clms(3):end), 1)]; + clear indx* + end + +elseif strcmp(tscale, 'seasonal_mnthly') + + syr = inpt(1, clms(2)); + eyr = inpt(end, clms(2)); + + dte = dtevec([12 syr-1], [11 eyr]); + dte = dte(2:3:end, :); + + % If inpt does not start in January or does not end in December + inpt = findtstps_ts(inpt, [12 syr-1 11 eyr]); + + bwd = inpt(1:3:end-2, clms(3):end); + cnt = inpt(2:3:end-1, clms(3):end); + fwd = inpt(3:3:end, clms(3):end); + + mask_bwd = zeros(size(bwd)); + mask_cnt = zeros(size(bwd)); + mask_fwd = zeros(size(bwd)); + + mask_bwd(~isnan(bwd)) = 1; + mask_cnt(~isnan(cnt)) = 1; + mask_fwd(~isnan(fwd)) = 1; + + div = mask_bwd + mask_cnt + mask_fwd; + + otpt = (bwd + cnt + fwd)./div; + otpt = [dte, otpt]; + + +end + +if exist('indvec') + if ~strcmp(tscale, 'ltm') + if strcmp(tscale, 'seasonal_mnthly') + otpt = [0 0 0 indvec; otpt]; + else + otpt = [0 indvec; otpt]; + end + else + otpt = [indvec; otpt]; + end +end diff --git a/tsmovav.m b/tsmovav.m new file mode 100644 index 0000000..df961ca --- /dev/null +++ b/tsmovav.m @@ -0,0 +1,14 @@ +function otpt = tsmovav(inpt, wndw) + +% The function computes the moving average of a matrix of which the columns +% contain time-series + +lag = (length(wndw)-1)/2; + +fld = [repmat(inpt(1, :), lag, 1); inpt; repmat(inpt(end, :), lag, 1)]; + +for i = 1:size(inpt, 2) + fld_f(:, i) = conv(fld(:, i), wndw); +end + +otpt = fld_f(lag+2:end-lag-1, :); diff --git a/tsplot.m b/tsplot.m new file mode 100644 index 0000000..770ac9f --- /dev/null +++ b/tsplot.m @@ -0,0 +1,101 @@ +function [f, varargout] = tsplot(nrplots, tscale, datatype, indxs, varargin) + + +if strcmp(datatype, 'full') + sind = 2; + if tscale == [1 12] + tmeinfo = 1; + else + tmeinfo = 3; + end + if length(indxs) == 1 + for i = 1:length(varargin) + ccol(i) = find(varargin{i}(1,:) == indxs); + end + else + for i = 1:length(varargin) + for j = 1:length(indxs) + ccol(i,j) = find(varargin{i}(1,:) == indxs(j)); + end + end + end +elseif strcmp(datatype, 'normal') + sind = 1; + tmeinfo = 1; + ccol = ones(length(varargin),1)*2; + tlte = [ ]; +end + +if tscale == [1 12] + mnths = mnthnms('vshort'); +end + +clr = [60 60 60; + 31 120 180; + 51 160 44; + 227 26 28; + 255 127 0; + 106 061 154; + 166 206 227; + 178 223 138; + 251 154 153; + 253 191 111; + 202 178 214]/255; +% +% clr = [ 60 60 60; +% 050 136 189; +% 244 109 67; +% 026 152 080; +% 240 130 40; +% 0 200 200; +% 230 220 50; +% 160 0 200; +% 160 230 50; +% 0 160 255; +% 240 0 130; +% 230 175 45; +% 0 210 140; +% 130 0 220]/255; + + +scrsz = get(0,'ScreenSize'); +f = figure('OuterPosition',[1 scrsz(4)/2 scrsz(3)/3 scrsz(4)/2]); + +if nrplots == 1 + h = subplot(1,1,1); + for i = 1:length(varargin) + + plot(varargin{i}(sind:end, tmeinfo), varargin{i}(sind:end, ccol(i)), 'Color', clr(i,:), 'Linewidth', 1.5); + hold on + end + datetick('x') + if tscale ~= [1 12] + set(gca, 'xlim', [datenum(tscale(1), 1, 15) datenum(tscale(2), 12, 15)]); + else + set(gca, 'xlim', [1 12]); + set(gca, 'xticklabel', mnths); + end + + + +else + + for i = 1:nrplots(1)*nrplots(2) + h(i) = subplot(nrplots(1), nrplots(2), i); + for j = 1:length(varargin) + plot(varargin{j}(sind:end, tmeinfo), varargin{j}(sind:end, ccol(j, i)), 'Color', clr(j,:), 'Linewidth', 1); + hold on + end + datetick('x'); + if tscale ~= [1 12] + set(gca, 'xlim', [datenum(tscale(1), 1, 15) datenum(tscale(2), 12, 15)]); + pbaspect([2 1 1]) + else + set(gca, 'xlim', [1 12]); + set(gca, 'xticklabel', mnths); + end + end + +end + +varargout{1} = h; diff --git a/varimax.m b/varimax.m new file mode 100644 index 0000000..d27bbca --- /dev/null +++ b/varimax.m @@ -0,0 +1,92 @@ +function [x, r] = varimax( x, normalize, tol, it_max ) +% VARIMAX - Rotate EOF's according to varimax algorithm +% +% This is actually a generic varimax routine and knows nothing special about +% EOFs. It expects a matrix of "loadings". Typically (in state space +% rotation), these loadings are the expansion coefficients (aka Principal +% Component Time Series) for the truncated basis of eigenvectors (EOFs), but +% they could also be the EOFs*diag(L)^(1/2) (in the case of rotation in +% sample space). +% +% Usage: [new_loads, rotmax] = varimax( loadings, normalize, tolerance, it_max ) +% +% where all but the loadings are optional. rotmax is the rotation matrix used. +% +% normalize determines whether or not to normalize the rows or columns of +% the loadings before performing the rotation. If normalize is true, then +% the rows are normalized by there individual lengths. Otherwise, no +% normalization is performed (default). After rotation, the matrix is +% renormalized. Normalizing over the rows corresponds to the Kaiser +% normalization often used in factor analysis. +% +% tolerance defaults to 1e-10 if not given. it_max specifies the maximum +% number of iterations to do - defaults to 1000. +% +% After the varimax rotation is performed, the new EOFs (in the case that +% the EC's were rotated - state space) can be found by new_eofs = +% eofs*rotmax. +% +% This function is derived from the R function varimax in the mva +% library. +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +% $Id: varimax.m,v 1.4 2002/10/10 00:28:30 dmk Exp $ +% +% Copyright (C) 2002 David M. Kaplan +% Licence: GPL +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +if nargin < 2 + normalize = 0; +end + +if nargin < 3 + tol = 1e-10; +end + +if nargin < 4 + it_max = 1000; +end + +[p, nc] = size(x); + +if nc < 2, return; end + +if normalize + rl = repmat( sqrt(diag( x*x' )), [1,nc] ); % By rows. + % rl = repmat( sqrt(diag( x'*x ))', [p,1] ); % By columns. + x = x ./ rl; +end + +TT = eye( nc ); +d = 0; + +for i = 1 : it_max + z = x * TT; + B = x' * ( z.^3 - z * diag(squeeze( ones(1,p) * (z.^2) )) / p ); + + [U,S,V] = svd(B); + + TT = U * V'; + + d2 = d; + d = sum(diag(S)); + + % End if exceeded tolerance. + if d < d2 * (1 + tol), break; end + +end + +% Final matrix. +x = x * TT; + +% Renormalize. +if normalize + x = x .* rl; +end + +if nargout > 1 + r = TT; +end diff --git a/varimax2.m b/varimax2.m new file mode 100644 index 0000000..5681227 --- /dev/null +++ b/varimax2.m @@ -0,0 +1,76 @@ +% [lambda,V] = varimax(lambda[,sign,tol]) Varimax rotation +% +% Performs varimax rotation (Kaiser 1958) on the column vectors contained +% in lambda. We follow Kaiser's notation. +% +% In: +% lambda: DxL matrix (L tol*V(length(V)) + + V_old = V(length(V)); + + for i=1:L-1 + for j=i+1:L + + % Optimal angle to rotate columns i, j + x = lambda(:,i)./h; + y = lambda(:,j)./h; + u = x.*x - y.*y; + v = 2*x.*y; + t = atan2( 2*(D*u'*v-sum(u)*sum(v)), D*(u'*u-v'*v)-sum(u)^2+sum(v)^2 )/4; + + % Anticlockwise rotation of angle t (t+pi is valid, too) + temp = [lambda(:,i) lambda(:,j)]*[cos(t) -sin(t); sin(t) cos(t)]; + lambda(:,i) = temp(:,1); + lambda(:,j) = temp(:,2); + + end + end + + % New value of the objective function + h = sqrt(sum(lambda'.^2))'+exp(-700); % Communalities + temp=lambda./(h*ones(1,L)); + V = [V sum(sum(temp.^4))-sum(sum(temp.^2).^2)/D]; % Objective function + +end + +% Sign inversion so that each column vector of lambda has mainly +% components of the same sign +if sign>0 + for i=1:L + if sum(lambda(:,i)) < 0 + lambda(:,i) = -lambda(:,i); + end + end +elseif sign<0 + for i=1:L + if sum(lambda(:,i)) > 0 + lambda(:,i) = -lambda(:,i); + end + end +end diff --git a/vec_curtosis.m b/vec_curtosis.m new file mode 100644 index 0000000..c5e8098 --- /dev/null +++ b/vec_curtosis.m @@ -0,0 +1,23 @@ +function [mu, sig, g1, g2] = vec_moments(ts); + +% Computes the curtosis of a matrix containting columns of time-series, +% i.e. each row might be a map and each column a time-series through a +% pixel + +nts = size(ts, 1); + + +% Compute the mean (first moment) +mu = mean(ts, 1); + +% Centralize the dataset +ts_c = ts - ones(nts, 1)*mu; + +% Compute the population variance (second moment) +sig = 1/nts*sum(ts_c.^2); + +% Compute the skewness (third moment) +g1 = (1/nts*sum(ts_c.^3))./((1/nts*sum(ts_c.^2)).^(3/2)); + +% Compute the curtosis (fourth moment) +g2 = (1/nts*sum(ts_c.^4))./((1/nts*sum(ts_c.^2)).^2) - 3; \ No newline at end of file diff --git a/vec_moments.m b/vec_moments.m new file mode 100644 index 0000000..e2beed8 --- /dev/null +++ b/vec_moments.m @@ -0,0 +1,34 @@ +function [mu, sig, g1, g2] = vec_moments(ts, bswitch); + +if nargin < 2, bswitch = 0; end + +% Computes the curtosis and skewness of a matrix containting columns of +% time-series, i.e. each row might be a map and each column a time-series +% through a pixel + +nts = size(ts, 1); + + +% Compute the mean (first moment) +mu = mean(ts, 1); + +% Centralize the dataset +ts_c = ts - ones(nts, 1)*mu; + +% Compute the population variance (second moment) +sig = 1/nts*sum(ts_c.^2); + +% Compute the skewness (third moment) +g1 = (1/nts*sum(ts_c.^3))./((1/nts*sum(ts_c.^2)).^(3/2)); + +% Compute the curtosis (fourth moment) +g2 = (1/nts*sum(ts_c.^4))./((1/nts*sum(ts_c.^2)).^2) - 3; + + +if bswitch + sig = (nts - 1)/nts * sig; + g1 = sqrt(nts*(nts - 1))/(nts - 2)*g1; + g2 = (nts - 1)/((nts - 2)*(nts - 3)) * ((nts + 1) * g2 + 6); +end + + \ No newline at end of file diff --git a/veccorr.m b/veccorr.m new file mode 120000 index 0000000..737b9e1 --- /dev/null +++ b/veccorr.m @@ -0,0 +1 @@ +/home/lorenz-c/Dokumente/GRACE/MATLAB functions/Validation functions/veccorr.m \ No newline at end of file diff --git a/vimfd_r.m b/vimfd_r.m new file mode 100644 index 0000000..7f59252 --- /dev/null +++ b/vimfd_r.m @@ -0,0 +1,47 @@ +function [dsdt, nanflag] = vimfd_r(vimfd, runoff); + +% ------------------------------------------------------------------------- +% The function computes the hydrometeorological storage change (i.e. +% vimfd - R of a set of catchments. +% It is assumed that the first two columns of vimfd and runoff contain +% months and years and the first row contains the catchment ids, according +% to which the output dsdt is created +% ------------------------------------------------------------------------- +% Input: vimfd [i x j] cell-array which contains month, year, +% eastward and northward component of specific +% humidity +% runoff [i x k] column in which the fields of the eastward +% component of specific humidity is stored +% +% Output: dsdt [i x j] +% +% ------------------------------------------------------------------------- +% Christof Lorenz, IMK-IFU Garmisch-Partenkirchen +% September 2010 +% ------------------------------------------------------------------------- +% Uses: +% ------------------------------------------------------------------------- + + +dsdt(:,1:2) = vimfd(:,1:2); + +% nan_msk = zeros(size(vimfd)); +% runoff_corr = runoff; + +% runoff_corr(isnan(runoff)) = 0; + +% dsdt(:,1:2) = vimfd(:,1:2); +k = 1; +for i = 3:size(vimfd,2) + + tmp = find(runoff(1,3:end) == vimfd(1,i)); +% keyboard + if tmp + dsdt(1,k+2) = vimfd(1,i); + dsdt(2:end,k+2) = vimfd(2:end,i) - runoff(2:end,tmp+2); + k = k + 1; + + end + clear tmp +end + \ No newline at end of file diff --git a/vincenty.m b/vincenty.m new file mode 100644 index 0000000..12d802c --- /dev/null +++ b/vincenty.m @@ -0,0 +1,93 @@ +function s = vincenty(theta1, theta2, dlambda, ellips); + +% The function computes the distance between two points on an ellipsoid +% given by their coordinates + + + +if nargin < 4 + ellips = 'WGS84'; +end + + + +if strcmp(ellips, 'WGS84') + a = 6378137; + b = 6356752.3142; + f = 1/298.257223563; +elseif strcmp(ellips, 'GRS80') + a = 6378137; + b = 6356752.3141; + f = 1/298.257222101; +elseif strcmp(ellips, 'Airy') + a = 6377563.396; + b = 6356256.909; + f = 1/299.3249646; +elseif strcmp(ellips, 'Intl') + a = 6378388; + b = 6356911.946; + f = 1/297; +elseif strcmp(ellips, 'Clarke') + a = 6378249.145; + b = 6356514.86955; + f = 1/293.465; +elseif strcmp(ellips, 'GRS67') + a = 6378160; + b = 6356774.719; + f = 1/298.25; +elseif strcmp(ellips, 'Bessel') + a = 6377397.155; + b = 6356078.963; + f = 1/299.1528153513233; +end + + + +U1 = atan((1-f)*tan(theta1)); +U2 = atan((1-f)*tan(theta2)); + + +lam = dlambda; +lams = 2*pi; + +it = 1; + +while abs(lam - lams) > 1e-12 + + sinsig = sqrt((cos(U1)*sin(lam))^2 + (cos(U1)*sin(U2) - sin(U1)*cos(U2)*cos(lam))^2); + cossig = sin(U1)*sin(U2) + cos(U1)*cos(U2)*cos(lam); + + sig = atan2(sinsig, cossig); + + sina = cos(U1)*cos(U2)*sin(lam)/sinsig; + + cos2a = 1 - sina^2; + + cos2sm = cos(sig) - 2*sin(U1)*sin(U2)/cos2a; + C = f/16 * cos2a * (4+f*(4-3*cos2a)); + lams = lam; + lam = dlambda + (1-C)*f*sina*(sig+C*sinsig*(cos2sm + C*cossig*(-1+2*cos2sm^2))); + it = it + 1; + + if it > 1000 + error('Solution does not converge...!') + break + end + +end + +u2 = cos2a*(a^2 - b^2)/b^2; +k1 = (sqrt(1+u2) - 1)/(sqrt(1+u2) + 1); + +A = (1 + 1/4*k1^2)/(1 - k1); +B = k1*(1 - 3/8*k1^2); + +dsig = B*sinsig*(cos2sm + 1/4*B*(cossig*(-1+2*cos2sm^2) - 1/6*B*cos2sm*(-3+4*sinsig^2)*(-3+4*cos2sm^2))); +s = b*A*(sig - dsig); + + + + + + + diff --git a/water_budget.m b/water_budget.m new file mode 100644 index 0000000..b02a867 --- /dev/null +++ b/water_budget.m @@ -0,0 +1,26 @@ +function [conv_land, conv_ocean] = water_budget(inpt, clms, period); + +% The function computes the global water budget of a given input dataset +% over one year. As a rule of thumb, this budget should be closed over a +% long term average, i.e. the convergence of moisture over land (+ P - E) +% should equal the divergence of moisture over the oceans ( - (P - E)) +% (Hagemann et. al., 2005) + + +sind = find(cell2mat(inpt(:,clms(1))) == ... + 1 & cell2mat(inpt(:,clms(2))) == period(1)); + +eind = find(cell2mat(inpt(:,clms(1))) == ... + 12 & cell2mat(inpt(:,clms(2))) == period(2)); + + +load continents.asc + + +mask_land = zeros(360, 720); +mask_land(continents > 0) = 1; + +mask_ocean = zeros(360, 720); +mask_ocean(continents == -9999) = 1; + + diff --git a/wcdiff.m b/wcdiff.m new file mode 100644 index 0000000..00824ad --- /dev/null +++ b/wcdiff.m @@ -0,0 +1,50 @@ +function ddt = wcdiff(inpt, clms, mval, wghts) + +if nargin < 3 + mval = -9999; +end + +if nargin < 2 + clms = [3 4 8]; +end + +mnths = cell2mat(inpt(:, clms(:,1))); +yrs = cell2mat(inpt(:, clms(:,2))); +inpt = inpt(:, clms(3)); + +if wghts == 1 + for i = 1:length(inpt) + if i == 1 + dom_fw = eomday(yrs(i+1), mnths(i+1)); + dom_bw = eomday(yrs(i), mnths(i)); + ddt{i,1} = (inpt{i+1}/dom_fw - inpt{i}/dom_bw)*dom_bw; + elseif i == length(inpt) + dom_fw = eomday(yrs(end), mnths(end)); + dom_bw = eomday(yrs(end-1), mnths(end-1)); + ddt{i,1} = (inpt{end}/dom_fw - inpt{end-1}/dom_bw)*dom_fw; + else + dom_fw = eomday(yrs(i+1), mnths(i+1)); + dom_bw = eomday(yrs(i-1), mnths(i-1)); + dom_c = eomday(yrs(i), mnths(i)); + ddt{i,1} = (inpt{i+1}/dom_fw - inpt{i-1}/dom_bw)/2*dom_c; + end + ddt{i,1}(inpt{i} == mval) = mval; + end + +else + + for i = 1:length(inpt) + if i == 1 + ddt{i,1} = inpt{i+1} - inpt{i}; + elseif i == length(inpt) + ddt{i,1} = inpt{end} - inpt{end-1}; + else + ddt{i,1} = (inpt{i+1} - inpt{i-1})/2; + end + ddt{i,1}(inpt{i} == mval) = mval; + end +end + + + + \ No newline at end of file diff --git a/wcdiffs_cell.m b/wcdiffs_cell.m new file mode 100644 index 0000000..ee740b5 --- /dev/null +++ b/wcdiffs_cell.m @@ -0,0 +1,37 @@ +function ddt = wcdiffs_cell(inpt, clms, wghts, mval) + +if nargin < 4, mval = -9999; end +if nargin < 3, wghts = 1; end +if nargin < 2, clms = [3 4 8]; end + +mnths = cell2mat(inpt(:, clms(1))); +yrs = cell2mat(inpt(:, clms(2))); + +if wghts == 1 + dom = eomday(yrs, mnths); +else + dom = ones(length(mnths), 1); +end + + +fields = inpt(:, clms(3)); + +ddt(:, 1) = inpt(:, clms(1)); +ddt(:, 2) = inpt(:, clms(2)); + +for i = 1:length(dom) + if i == 1 + ddt{1,3} = (dom(2)*fields{2} - dom(1)*fields{1})/((dom(2)/2 + dom(1)/2)); + elseif i == length(dom) + ddt{i,3} = (dom(i)*fields{i} - dom(i-1)*fields{i-1})/((dom(i)/2 + dom(i-1)/2)); + else + ddt{i,3} = (dom(i+1)*fields{i+1} - dom(i-1)*fields{i-1})/((dom(i+1)/2+dom(i)+dom(i-1)/2)); + end + ddt{i,3}(fields{i} == mval) = mval; +end + + + + + + diff --git a/wdiffs_cell.m b/wdiffs_cell.m new file mode 100644 index 0000000..012492d --- /dev/null +++ b/wdiffs_cell.m @@ -0,0 +1,32 @@ +function ddt = wcdiffs_cell(inpt, clms, wghts, mval) + +if nargin < 4, mval = -9999; end +if nargin < 3, wghts = 1; end +if nargin < 2, clms = [3 4 8]; end + +mnths = cell2mat(:, clms(1))); +yrs = cell2mat(:, clms(2))); +dom = eomday(yrs, mnths); + +fields = inpt(:, clms(3)); + +ddt(:, 1) = inpt(:, clms(1)); +ddt(:, 2) = inpt(:, clms(2)); + +for i = 1:length(dom) + if i == 1 + ddt{1,3} = (dom(2)*fields{2} - dom(1)*fields{1})/((dom(2) + dom(1))*(dom(2)/2 + dom(1)/2)); + elseif i == length(dom) + ddt{i,3} = (dom(i)*fields{i} - dom(i-1)*fields{i-1})/((dom(i) + dom(i-1))*(dom(i)/2 + dom(i-1)/2)); + else + ddt{i,3} = (dom(i+1)*fields{i+1} - dom(i-1)*fields{i-1})/((dom(i+1)+dom(i-1))*(dom(i+1)/2+dom(i)+dom(i-1)/2)); + end + + ddt{i,3}(fields{i} == mval) = mval; +end + + + + + + diff --git a/whitening.m b/whitening.m new file mode 100644 index 0000000..c0bfbc0 --- /dev/null +++ b/whitening.m @@ -0,0 +1,8 @@ +function [Fw, B] = whitening(F, eofs, lams); + + +scales = sqrt(lams); +B = diag(1./scales)*eofs; + +Fw = eofs*F; + diff --git a/wmnpxl.m b/wmnpxl.m new file mode 100644 index 0000000..248a85b --- /dev/null +++ b/wmnpxl.m @@ -0,0 +1,72 @@ +function mn = wmnpxl(fld, lat_ref, lon_ref, lat_pxl, lon_pxl, clms); + + +if nargin < 6, clms = [3 4 8]; end +if length(clms) == 4 + dset = 'daily'; +elseif length(clms) == 3 + dset = 'monthly'; +elseif length(clms) == 2 + dset = 'annual'; +end + + +for i = 1:length(lat_pxl) + + D_lat = lat_ref - lat_pxl(i); + D_lon = lon_ref - lon_pxl(i); + + r(i) = find(abs(D_lat) == min(abs(D_lat)), 1); + c(i) = find(abs(D_lon) == min(abs(D_lon)), 1); + + fld_lat = lat_ref(r(i)); + fld_lon = lon_ref(c(i)); + fprintf('Nearest pixel: lambda = %5f, theta = %5f \n', fld_lon, fld_lat) +end +keyboard +if size(lat_ref, 2) == 1 + if strcmp(dset, 'daily') + for i = 1:length(fld) + mn(i,1) = fld{i, clms(1)}; + mn(i,2) = fld{i, clms(2)}; + mn(i,3) = fld{i, clms(3)}; + mn(i,4) = datenum(fld{i, clms(3)}, fld{i, clms(2)}, fld{i, clms(1)}); + + for j = 1:length(lat_pxl) + mn(i,j+4) = fld{i, clms(4)}(r(j), c(j)); + end + end + mn = [0 0 0 0 1:length(lat_pxl); mn]; + elseif strcmp(dset, 'monthly') + for i = 1:length(fld) + mn(i,1) = fld{i, clms(1)}; + mn(i,2) = fld{i, clms(2)}; + mn(i,3) = datenum(fld{i, clms(2)}, fld{i, clms(1)}, 15); + + for j = 1:length(lat_pxl) + mn(i,j+3) = fld{i, clms(3)}(r(j), c(j)); + end + end + mn = [0 0 0 1:length(lat_pxl); mn]; + end + +else + + K = abs(D_lat) + abs(D_lon); + + [r, c] = find(K == min(min(K))); + + fld_lat = lat_ref(r, c); + fld_lon = lon_ref(r, c); + +end + + + + +% +% P1_lat = D_lat(find(D_lat < delta_lat & D_lat > 0); +% P1_lon = D_lon(find(D_lon < delta_lon & D_lon > 0); +% +% P2_lat = D_lat(find(D_lat <= delta_lat & D_lat >= 0); +% P2_lon = D_lon(find(D_lon <= delta_lon & D_lon >= 0); \ No newline at end of file diff --git a/wrf2glbl.m b/wrf2glbl.m new file mode 100644 index 0000000..843b3fb --- /dev/null +++ b/wrf2glbl.m @@ -0,0 +1,65 @@ +function [new_fld, n_lon, n_lat, glbl_fld] = wrf2glbl(old_fld, o_lon, o_lat, glbl_fld) + + +if abs(o_lon(1,1)-fix(o_lon(1,1))) > 0.5 + start_lon = round(o_lon(1,1))-sign(o_lon(1,1))*0.5; +elseif abs(o_lon(1,1)-fix(o_lon(1,1))) < 0.5 + start_lon = fix(o_lon(1,1)); +elseif abs(o_lon(1,1)-fix(o_lon(1,1))) == 0.5 + start_lon = o_lon(1,1); +end + +if abs(o_lon(1,end)-fix(o_lon(1,end))) > 0.5 + end_lon = round(o_lon(1,end))-sign(o_lon(1,end))*0.5; +elseif abs(o_lon(1,end)-fix(o_lon(1,end))) < 0.5 + end_lon = fix(o_lon(1,end)); +elseif abs(o_lon(1,end)-fix(o_lon(1,end))) == 0.5 + end_lon = o_lon(1,1); +end + +if abs(o_lat(1,1)-fix(o_lat(1,1))) > 0.5 + end_lat = round(o_lat(1,1))-sign(o_lat(1,1))*0.5; +elseif abs(o_lat(1,1)-fix(o_lat(1,1))) < 0.5 + end_lat = fix(o_lat(1,1)); +elseif abs(o_lat(1,1)-fix(o_lat(1,1))) == 0.5 + end_lat = o_lat(1,1); +end + +if abs(o_lat(end,1)-fix(o_lat(end,1))) > 0.5 + start_lat = round(o_lat(end,1))-sign(o_lat(end,1))*0.5; +elseif abs(o_lat(end,1)-fix(o_lat(end,1))) < 0.5 + start_lat = fix(o_lat(end,1)); +elseif abs(o_lat(end,1)-fix(o_lat(end,1))) == 0.5 + start_lat = o_lat(end,1); +end + + + +lambda = start_lon-0.25*sign(start_lon):0.5:end_lon-0.25*sign(end_lon); +theta = start_lat-0.25*sign(start_lat):0.5:end_lat-0.25*sign(end_lat); + +[n_lon, n_lat] = meshgrid(lambda,flipud(theta)); + +n_lat = flipud(n_lat); + +new_fld = interp2(o_lon, o_lat, old_fld, n_lon, n_lat, 'bicubic'); + + +lambda_glbl = -179.75:0.5:179.75; +theta_glbl = 89.75:-0.5:-89.75; + +u_th = find(theta_glbl == n_lat(1,1)); +l_th = find(theta_glbl == n_lat(end,1)); + +u_la = find(lambda_glbl == n_lon(1,1)); +l_la = find(lambda_glbl == n_lon(1,end)); + +glbl_fld(u_th:l_th, u_la:l_la) = new_fld; + + + + + + + + diff --git a/xy2gmt.m b/xy2gmt.m new file mode 100644 index 0000000..fcbd4db --- /dev/null +++ b/xy2gmt.m @@ -0,0 +1,12 @@ +function otpt = xy2gmt(y, x, scle); + +if nargin < 2 + x(:,1) = 1:1:length(y); +end + +if size(y,2) > 1 + y = y'; +end + +otpt = [x y ones(length(x),1)*scle]; + diff --git a/yrlplt.m b/yrlplt.m new file mode 100644 index 0000000..96aafb0 --- /dev/null +++ b/yrlplt.m @@ -0,0 +1,11 @@ +function yrlplt(inpt, ts_clm, mnth_clm, yr_clm, strt_rw, nme) + +mnth = inpt(strt_rw:end,mnth_clm); +yr = inpt(strt_rw:end,yr_clm); + +for i = 1:length(ts_clm) + ts(:,i) = inpt(strt_rw:end,ts_clm); +end + + + diff --git a/zblank.m b/zblank.m new file mode 100644 index 0000000..3206f9b --- /dev/null +++ b/zblank.m @@ -0,0 +1,40 @@ +function z = zblank(x,y,xp,yp) + +% Interface to the mex file polymx.c +% This is a primitive version, with no argument checking. +% Its main purpose is to be sure polymx gets monotonically +% increasing x and y, as required. It also converts the +% zeros and ones to Nans and zeros, respectively. This +% functionality could, of course, be added to polymx.c; +% but it is fast enough this way. + +if (isempty(xp)) + z = zeros(length(x), length(y)); + return +end + +rev_x = 0; +if diff(x(1:2)) < 0, + rev_x = 1; + x = x(end:-1:1); +end +rev_y = 0; +if diff(y(1:2)) < 0, + rev_y = 1; + y = y(end:-1:1); +end + +z = polymx(x,y,xp,yp); + + imask = (z == 0); + z(find(imask)) = NaN; + z(find(~imask)) = 0; + + +if rev_x, + z = z(end:-1:1,:); +end + +if rev_y, + z = z(:,end:-1:1); +end \ No newline at end of file diff --git a/zgrid.m b/zgrid.m new file mode 100644 index 0000000..f9a1ce2 --- /dev/null +++ b/zgrid.m @@ -0,0 +1,175 @@ +function [Zg, Xo, Yo, Zb] = zgrid(xi, yi, zi, xo, yo, varargin) +% function [Zg, Xo, Yo, Zb] = zgrid(xi, yi, zi, xo, yo, varargin) +% +% Inputs: xi, yi are vectors or matrices with (x,y) coordinates. +% zi is "elevation" at (x,y). +% xi, yi, zi can all have the same dimensions, +% or if zi is a matrix, xi can be the x values of +% the row dimension, yi the y values of the column +% dimension. (i.e., x corresponds to the first +% index of zi, y to the second.) +% xo, yo are (1) vectors defining UNIFORM new grids in the +% x and y directions. Uniformity is not checked, +% but the results will not be correct without it. +% (2) Either may instead be scalar, in +% which case it will be the target number of +% segments into which the original grid, xi +% or yi, will be divided by the "stretch.m" +% routine. xi or yi must in that case be a +% vector with the row or column dimension of +% zi. Additional parameters for stretch +% may then be given as follows. +% varargin represents any or all of several optional +% argument pairs: +% 1) stretch arguments: +% 'nsegmin', n min number of segments; default +% 2, min is 1 +% 'nsegmax', n max number of segments in initial +% segmentation; actual max may exceed this +% locally; default is the same as +% the target. +% 'dyfrac', d maximum relative change in grid +% spacing. Number of segments is increased +% locally until the relative change is below +% this. Default 0.5. +% 2) zgridmx arguments: +% 'cay', c c = 0 for Laplacian, c >> 1 for pure spline +% 'del', d d << 1 to interpolate mainly in x +% 'nrng', n interpolate/extrapolate no more than +% nrng grid points away from a data point +% bpx, bpy vectors with x, y coordinates of vertices +% of a CLOSED blanking polygon; grid +% points OUTSIDE the polygon will be NaN +% on output +% Outputs: Zg is the matrix of zi gridded on xo and yo. +% It is NaN beyond "nrng" or the blanking polygon. +% Xo, Yo are a matrices with the same dimensions as Zg, +% giving the x, y coordinates of the grid. +% Zb is an array of the same dimensions with 0 in the +% unblanked region, NaN where the grid is blanked. +% + +% +% (Matlab 5 only) +% This is a shell for the zgridmx.c mex file, which in turn is a +% C translation of Roger Lukas's Fortran zgrid routine as taken +% from the UH Fortran contour program. +% +% Eric Firing, 97/04/26 +% + +% defaults: +cay = 1; +del = 1; +nrng = 3; +bpx = []; +bpy = []; +nsegmin = 1; +dyfrac = 0.5; +nsegmax = 0; % dummy default; actual is set based on input + +% use while loop; it is more flexible in case we don't have +% arguments always in pairs. +i = 1; +while i < length(varargin) % less than, because all args come in pairs + arg = varargin{i}; + arg1 = varargin{i+1}; + if isstr(arg) + if strcmp(arg, 'cay') + cay = arg1; + elseif strcmp(arg, 'del') + del = arg1; + elseif strcmp(arg, 'nrng') + nrng = arg1; + elseif strcmp(arg, 'nsegmin') + nsegmin = arg1; + elseif strcmp(arg, 'nsegmax') + nsegmax = arg1; + elseif strcmp(arg, 'dyfrac') + dyfrac = arg1; + else + error(['Unrecognized string input argument: ' arg]); + end + else + bpx = arg; + bpy = arg1; + end + i = i + 2; +end +if i ~= length(varargin)+1 + error('There is a leftover argument or a missing argument'); +end + + +[mxi, nxi] = size(xi); +[myi, nyi] = size(yi); +[mzi, nzi] = size(zi); + +lxi = mxi*nxi; +lyi = myi*nyi; +lzi = mzi*nzi; + +if (lxi ~= lzi | lyi ~= lzi) + if (mzi ~= nzi & lyi == mzi & lxi == nzi) + disp('Warning: swapping x and y to fit dimensions of z') + xsave = xi; + xi = yi; + yi = xsave; + lyi = length(yi); + lxi = length(xi); + clear xsave; + end + xi = xi(:); + xi = xi(:,ones(nzi,1)); + yi = yi(:).'; + yi = yi(ones(mzi,1),:); +end +if (~(length(xi(:)) == lzi & length(yi(:)) == lzi)) + error('dimension mismatch among xi, yi, zi') +end + + +xo = xo(:); % column +nxo = length(xo); +if nxo == 1, + if (size(xi,1) ~= mzi) + error('length of xi must match row dimension of zi'); + end + nsegmed = xo; + nsegmax = max([nsegmax nsegmed]); + [xo, ixo] = stretch(xi(:,1), nsegmed, nsegmin, nsegmax, dyfrac); + ixo = ixo(:); + xi = ixo(:,ones(nzi,1)); + dx = 1; + xo1 = 1; + nxo = length(xo); +else + dx = diff(xo([1 2])); + xo1 = xo(1); +end + +yo = yo(:).'; % row +nyo = length(yo); +if nyo == 1, + if (size(yi,2) ~= nzi) + error('length of yi must match column dimension of zi'); + end + nsegmed = yo; + nsegmax = max([nsegmax nsegmed]); + [yo, iyo] = stretch(yi(1,:), nsegmed, nsegmin, nsegmax, dyfrac); + yi = iyo(ones(mzi,1),:); + dy = 1; + yo1 = 1; + nyo = length(yo); +else + dy = diff(yo([1 2])); + yo1 = yo(1); +end + +Xo = xo(:,ones(1,nyo)); +Yo = yo(ones(1,nxo),:); + +Zb = zblank(xo, yo, bpx, bpy); + +ii = find(~isnan(zi)); +Zg = zgridmx(Zb, xi(ii), yi(ii), zi(ii), xo1, yo1, dx, dy, del, cay, nrng, 1); \ No newline at end of file diff --git a/zonal_contour_plot.m b/zonal_contour_plot.m new file mode 100644 index 0000000..f6dba9e --- /dev/null +++ b/zonal_contour_plot.m @@ -0,0 +1,45 @@ +function [] = zonal_contour_plot(inpt, range) + +lats = 1:360; +mnth = 1:12; + +x_tck{1} = 'J'; +x_tck{2} = 'F'; +x_tck{3} = 'M'; +x_tck{4} = 'A'; +x_tck{5} = 'M'; +x_tck{6} = 'J'; +x_tck{7} = 'J'; +x_tck{8} = 'A'; +x_tck{9} = 'S'; +x_tck{10} = 'O'; +x_tck{11} = 'N'; +x_tck{12} = 'D'; + +y_tck{1} = '90S'; +y_tck{2} = '60S'; +y_tck{3} = '30S'; +y_tck{4} = '0'; +y_tck{5} = '30N'; +y_tck{6} = '60N'; +y_tck{7} = '90N'; + + + +load /home/lorenz-c/Data/colormaps/precip_zonal.mat +figure + +contourf(flipud(inpt), range, 'linestyle', 'none') + +axis([1.5 13.5 61 330]); +% axis([1.5 13.5 1 360]); +set(gca, 'xtick', 2:1:13); +set(gca, 'xticklabel', x_tck, 'fontsize', 14); + +set(gca, 'ytick', 1:60:360); +set(gca, 'yticklabel', y_tck, 'fontsize', 14); + +colormap(precip_zonal) +grid on +pbaspect([12 6 1]) +caxis([0 9]) \ No newline at end of file diff --git a/ztransform.m b/ztransform.m new file mode 100644 index 0000000..77eef61 --- /dev/null +++ b/ztransform.m @@ -0,0 +1,23 @@ +function Z = ztransform(inpt, frmt) + + +if strcmp(frmt, 'full') + fld = inpt(2:end, 4:end); +else + fld = inpt; +end + +sdev = nanstd(fld); +mn = nanmean(fld); +nts = length(fld(:,1)); + +Z = (fld - ones(nts, 1)*mn)./(ones(nts,1)*sdev); + + +if strcmp(frmt, 'full') + Z = [inpt(1, :); inpt(2:end, 1:3) Z]; +end + + + + \ No newline at end of file