Thursday, April 23, 2015
Monday, April 20, 2015
Python - Converting multiple File geodatabase (.gdb) into multiple Personal geodatabase (.mdb)
Lets say that I have several file geodatabase in a folder and each of the file geodatabase has 1 feature dataset name "URS" and rest are feature classes. "URS" feature dataset has several feature class as well. Individually I can export the file geodatabase to XML workspace and then import that into the personnel geodatabase and all domains go over fine. But the task for me is to convert multiple file geodatabase into multiple personal geodatabase without including feature dataset,. If the geodatabase don't have "URS" feature dataset or any other feature classes then the python program will create empty personal geodatabase.
This is how we need to accomplish converting multiple file geodatabase (.gdb) into multiple personal geodatabase.
This is how we need to accomplish converting multiple file geodatabase (.gdb) into multiple personal geodatabase.
Here is the Pyhton code to accomplish this task.
If you don't want any Feature class from feature dataset, then this how u need to reflect the changes in the above mentioned code.
Thursday, April 16, 2015
Matlab - PERSIANN Climate Data Modeling
Let's say you have downloaded PERSIANN climate data files from the Centre of Hydrometerology and Remote Sensing.
Executing below mentioned codes on the downloaded .bin files will give you below mentioned output.
% this program is for training of CHRS graduate created by Prashant Kuranjekar
% last edit: 03/12/2007
% Usage: follow the hint in the screen and enjoy!
% Notice: You NEED yyyymmdd2yydoy.m to run me!
clear all;
%close all;
% read data
NDIR = '/mnt/data01/goesdata/globccs/nesdis/';
yyyy = input('Please input the year (format: yyyy):');
mm = input('Please input the month (format: mm):');
ddsta = input('Please input the start day (format: dd):');
hrsta = input('Please input the start hour (format: hr):');
mnsta = input('Please input the start minute (format: mn):');
rdcnt = input('Please input the number of records (48 for a day):');
lgtlw = input('Please input the lower boundary of longitude degree ("-" for West):');
lgtup = input('Please input the upper boundary of longitude degree ("-" for West):');
lttlw = input('Please input the lower boundary of latitude degree ("-" for North):');
lttup = input('Please input the upper boundary of latitude degree ("-" for North):');
[col_l,col_u]=mapping(lgtlw,lgtup,-180,180,1,9000);
[row_l,row_u]=mapping(lttlw,lttup,-60,60,1,3000);
col=col_l:1:col_u;
row=row_l:1:row_u;
mask=nan(3000,9000);
mask(row,col)=1;
if lgtlw<0
strlgtlw=sprintf('W%.3f',0-lgtlw);
else
strlgtlw=sprintf('E%.3f',lgtlw);
end
if lgtup<0
strlgtup=sprintf('W%.3f',0-lgtup);
else
strlgtup=sprintf('E%.3f',lgtup);
end
if lttlw<0
strlttlw=sprintf('N%.3f',0-lttlw);
else
strlgtlw=sprintf('S%.3f',lttlw);
end
if lttup<0
strlttup=sprintf('W%.3f',0-lttup);
else
strlttup=sprintf('E%.3f',lttup);
end
disp(['The selected boundary is ',strlgtlw,'~',strlgtup,', ',strlttlw,'~',strlttup]);
disp(['Relative zone in the matrix is row:',num2str(row_l),'~',num2str(row_u),', column:',num2str(col_l),'~',num2str(col_u)]);
%initialize
totp = zeros(3000,9000);
cnt = 0;
dd = ddsta;
hr = hrsta;
mn = mnsta;
scrsz = get(0,'ScreenSize');
figure1 = figure('Position',[1 scrsz(4)/2 (scrsz(3)-2)/2 scrsz(4)/2],'Name','[I] Changing Process of average data','NumberTitle','off');
figure2 = figure('Position',[scrsz(3)/2 scrsz(4)/2 (scrsz(3)-2)/2 scrsz(4)/2],'Name','[II] Changing Process of actural data','NumberTitle','off');
nperseries = zeros(3000:9000:rdcnt);
% data read and process
for rdnum = 1:1:rdcnt
yydoy = yyyymmdd2yydoy(yyyy,mm,dd);
shr = sprintf('%02d',hr);
smn = sprintf('%02d',mn);
gzfilename = [NDIR, 'rgccs', yydoy, shr, smn, '.bin.gz'];
filename = ['rgccs', yydoy, shr, smn, '.bin'];
disp(['Reading Data in ', filename,' ......']);
unix(['gunzip -c ', gzfilename, ' > ', filename]);
fid = fopen(filename, 'r', 'b');
npers = fread(fid, [9000 3000], 'int16', 'ieee-le');
fclose(fid);
npers = (npers/100)';
npers2 = npers;
npers3 = ones(3000,9000);
npers(find(npers<0)) = nan;
npers2(find(npers2<0)) = 0;
npers3(isnan(npers)) = 0;
nperseries(:,:,rdnum)=npers;
totp = totp+npers2;
cnt = cnt+npers3;
disp(['Read Data in ', filename, ' sucessfully!']);
unix(['rm ',filename]);
if mn == 15
mn = 45;
else
mn = 15;
hr = hr+1;
if hr == 24
hr = 00;
dd = dd+1;
end
end
% mean
avep=totp./cnt;
avep_selected=avep.*mask;
npers_selected=npers.*mask;
% plot daily data & mean
figure(figure1);
imagesc(avep_selected);
colorbar;
figure(figure2);
imagesc(npers_selected);
colorbar;
end
IsRepeat = input('Do you want to see the simulation of areal precipitaion changing process (Y/N):','s');
if IsRepeat == 'Y' || IsRepeat == 'y'
disp('The animation is shown in figure [II], press "ctrl+c" to exit!')
rptcnt = 0;
while 1
rdnum = rem(rptcnt,rdcnt)+1;
figure(figure2);
imagesc(nperseries(:,:,rdnum));
colorbar;
pause(0.5);
rptcnt = rptcnt+1;
end
else
close all;
disp('===================End of persiann simulation @Prashant Kuranjekar. 2007===================');
end
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
clear all;
RDIR = ['/mnt/data01/goesdata/globccs/nesdis/'];
%RDIR = ['/mnt/data01/goesdata/globccs/rapids/'];
%RDIR = ['/mnt/dans/goesdata/globccs/merge/out_post/'];
RES = 0.04;
%nesdis
hthr=20;
%dthr=8;
dthr=7;
%Rapids
%hthr=40;
%dthr=10;
%Ask for the input
% file = Erin;
% file3 = Ptot_hrly;
% file1 = Pcnt_dly;
% file2 = Pcnt;
%
% year = 2007;
% dstart= 221;
% dend = 232;
% hstart= 0;
% hend = 23;
%________________________________________________________________________
% lower_left_long = -110;
% lower_right_long = -85;
% lower_left_lat = 25;
% upper_left_lat = 38;
%Ask for the input
file = input('Enter the file\n','s');
file3 = input('Enter the filename for Ptot_hrly\n','s');
file1 = input('Enter the filename for Pcnt_dly\n','s');
file2 = input('Enter the filename for Pcnt\n','s');
year = input('Enter the year\n');
dstart= input('Enter the start day\n');
dend = input('Enter the end day\n');
hstart= input('Enter the start hour\n');
hend = input('Enter the end hour\n');
% %________________________________________________________________________
lower_left_long = input('Enter the lower left longitude\n');
lower_right_long = input('Enter the lower right longitude\n');
lower_left_lat = input('Enter the lower left latitude\n');
upper_left_lat = input('Enter the upper left latitude\n');
%________________________________________________________________________
if year<2000
year = num2str(year-1900);
yr=[year];
end
if year==2000
year = strcat(['0',num2str(year-2000)]);
yr=[year];
end
if 2000<year<2010
year = strcat(['0',num2str(year-2000)]);
yr=[year];
end
if year>=2010
year = num2str(year-2000);
yr=[year];
end
cellsize = 0.04;
ROWS = 3000;
COLS = 9000;
nodata_value = -99;
Xref = 0.02;
Yref = 59.8;
X = 0;
Y = 0;
Xref_adj = round(((Xref-X)/cellsize)+0.5)+4500;
Yref_adj = 1500-round((Yref-Y)/cellsize);
%______________________________________________________________________
%PART 1
if (lower_left_long>=-180 & lower_left_long<=180)
left_col= round(((lower_left_long-Xref)/cellsize)+0.5)+Xref_adj;
end
if (lower_right_long>=-180 & lower_right_long<=180)
Right_col= round(((lower_right_long-Xref)/cellsize)-0.5)+Xref_adj;
end
ncols=(Right_col-left_col)+1;
%PART 2
if (lower_left_lat>=-90 & lower_left_lat<=90)
lower_lat= Yref_adj+round((Yref-lower_left_lat)/cellsize);
end
if (upper_left_lat>=-90 & upper_left_lat<=90)
upper_lat= Yref_adj+1+round((Yref-upper_left_lat)/cellsize);
end
nrows = (lower_lat-upper_lat)+1;
Ptot=zeros(nrows,ncols);
Pcnt=zeros(nrows,ncols);
%___________________________________________________________________
for day = dstart:dend
if (day<10)
day = strcat(['0','0',num2str(day)]);
elseif (day<100)
day = strcat(['0',num2str(day)]);
else day = num2str(day);
day =[day];
end
%_____________________________________________________________________
Ptot_dly=zeros(nrows,ncols);
Pcnt_dly=zeros(nrows,ncols);
for hour=hstart:hend
if (hour<10)
hh = strcat(['0', num2str(hour)]);
else hh = num2str(hour);
end
%_____________________________________________________________________
Ptot_hrly=zeros(nrows,ncols);
for minutes=15:30:45
%for minutes=mstart:mend
if (minutes<10)
mm = strcat(['0', num2str(minutes)]);
else mm = num2str(minutes);
end
%_______________________________________________________________________
%_______________________________________________________________________
%_______________________________________________________________________
yrdayhhmm = strcat(yr,day,hh,mm);
%________________________________________________________________________
%str = [ '!gunzip -c ' RDIR 'rgccs' yrdayhhmm '.bin.gz > /tmp/rgccs' yrdayhhmm '.bin' ];
%eval(str);
str = [ '!gunzip -c ' RDIR 'rgccs' yrdayhhmm '.bin.gz > rgccs' yrdayhhmm '.bin' ];
eval(str);
%_________________________________________________________________________
fid=fopen(['rgccs' yrdayhhmm '.bin'],'r','b');
%if fid>=0
pers_mat1=fread(fid,[9000 3000],'short','ieee-le');
%pers_mat1=fread(fid,[9000 3000],'short','ieee-be');
pers_mat = pers_mat1/100;
fclose(fid);
str = [ '!rm rgccs' yrdayhhmm '.bin' ];
eval(str);
Ind = find(pers_mat<0);
pers_mat(Ind)=nan;
p = (pers_mat)';
a = [p(:,4501:9000) p(:,1:4500)];
a;
%end
%_____________________________________________________________________
m= [a(upper_lat:lower_lat,left_col:Right_col)];
m = 0.5*m;
%save m m -ascii;
%save(strcat([file,yr,day,hh,mm,'.asc']),'m','-ascii');
%_________________________________________________________________________
nanid=~isnan(m);
m(find(isnan(m)==1))=0;
%m=nanid.*m;
Ptot_hrly=Ptot_hrly+m;
Ptot_dly=Ptot_dly+m;
Pcnt_dly=Pcnt_dly+nanid;
%___________________________________________________________________________
end%minutes
save(strcat([file3,yr,day,hh,'.asc']),'Ptot_hrly','-ascii');
end%hrly
save(strcat([file1,yr,day,'.asc']),'Pcnt_dly','-ascii');
Ptot_dly=Ptot_dly./Pcnt_dly*(hend-hstart+1);
ind=find(Pcnt_dly<hthr);
Ptot_dly(ind)=NaN;
nanid=~isnan(Ptot_dly);
Ptot_dly(find(isnan(Ptot_dly)==1))=0;
save(strcat([file,yr,day,'.asc']),'Ptot_dly','-ascii');
%Ptot_dly=nanid.*Ptot_dly;
%Ptot=zeros(nrows,ncols);
%Pcnt=zeros(nrows,ncols);
Ptot=Ptot+Ptot_dly;
Pcnt=Pcnt+nanid;
end%day
Total_accumulated_days = dend-dstart+1;
Total_accumulated_days = num2str(Total_accumulated_days);
save(strcat([file2,yr,Total_accumulated_days,'.asc']),'Pcnt','-ascii');
Ptot=Ptot./Pcnt*(dend-dstart+1);
ind=find(Pcnt<dthr);
Ptot(ind)=NaN;
save(strcat([file,yr,Total_accumulated_days,'.asc']),'Ptot','-ascii');
imagesc(Ptot);
colorbar
% %read country boundary
% fid=fopen('country.bin','r','b');
% coun_bin=fread(fid,[2 478870/2],'float');
% country = (coun_bin)';
% Ind = find(country<-400);
% country(Ind)=nan;
% country;
% %plotting
% xx = (country(:,1)*25)+4500.5;
% yy = (country(:,2)*-25)+1500.5;
% xy = [xx yy];
% figure;
% imagesc(Ptot);
% hold on;
% plot(xy(:,1),xy(:,2),'w');
% xlabel('longitude');
% ylabel('Latitude');
% %title('Trend of snow water equivalent')
% vec1 = [1000 1000 1000 1000 1000 1000 1000 1000 1000];
% vec2 = [0 vec1];
% vec3 = cumsum(vec2);
% vectick=[' 0'; ' 40'; ' 80'; '120'; '160'; '200'; '240'; '280'; '320'; '360'];
% set(gca,'Xtick',vec3);
% set(gca,'XtickLabel',vectick);
% %_____________________________________________________________________
% vec4 = [500 500 500 500 500 500];
% vec5 = [0 vec4];
% vec6 = cumsum(vec5);
% vectick=['60'; '40'; '20'; ' 0'; '20'; '40'; '60'];
% set(gca,'Ytick',vec6);
% set(gca,'YtickLabel',vectick);
% %_________________________________________________________________________
% %k = strcat([file,yr,day,hh,'.jpeg']);
k = strcat([file,yr,Total_accumulated_days,'.bmp']);
%print('-djpeg',k);
print('-dbmp',k);
colorbar
Executing below mentioned codes on the downloaded .bin files will give you below mentioned output.
% this program is for training of CHRS graduate created by Prashant Kuranjekar
% last edit: 03/12/2007
% Usage: follow the hint in the screen and enjoy!
% Notice: You NEED yyyymmdd2yydoy.m to run me!
clear all;
%close all;
% read data
NDIR = '/mnt/data01/goesdata/globccs/nesdis/';
yyyy = input('Please input the year (format: yyyy):');
mm = input('Please input the month (format: mm):');
ddsta = input('Please input the start day (format: dd):');
hrsta = input('Please input the start hour (format: hr):');
mnsta = input('Please input the start minute (format: mn):');
rdcnt = input('Please input the number of records (48 for a day):');
lgtlw = input('Please input the lower boundary of longitude degree ("-" for West):');
lgtup = input('Please input the upper boundary of longitude degree ("-" for West):');
lttlw = input('Please input the lower boundary of latitude degree ("-" for North):');
lttup = input('Please input the upper boundary of latitude degree ("-" for North):');
[col_l,col_u]=mapping(lgtlw,lgtup,-180,180,1,9000);
[row_l,row_u]=mapping(lttlw,lttup,-60,60,1,3000);
col=col_l:1:col_u;
row=row_l:1:row_u;
mask=nan(3000,9000);
mask(row,col)=1;
if lgtlw<0
strlgtlw=sprintf('W%.3f',0-lgtlw);
else
strlgtlw=sprintf('E%.3f',lgtlw);
end
if lgtup<0
strlgtup=sprintf('W%.3f',0-lgtup);
else
strlgtup=sprintf('E%.3f',lgtup);
end
if lttlw<0
strlttlw=sprintf('N%.3f',0-lttlw);
else
strlgtlw=sprintf('S%.3f',lttlw);
end
if lttup<0
strlttup=sprintf('W%.3f',0-lttup);
else
strlttup=sprintf('E%.3f',lttup);
end
disp(['The selected boundary is ',strlgtlw,'~',strlgtup,', ',strlttlw,'~',strlttup]);
disp(['Relative zone in the matrix is row:',num2str(row_l),'~',num2str(row_u),', column:',num2str(col_l),'~',num2str(col_u)]);
%initialize
totp = zeros(3000,9000);
cnt = 0;
dd = ddsta;
hr = hrsta;
mn = mnsta;
scrsz = get(0,'ScreenSize');
figure1 = figure('Position',[1 scrsz(4)/2 (scrsz(3)-2)/2 scrsz(4)/2],'Name','[I] Changing Process of average data','NumberTitle','off');
figure2 = figure('Position',[scrsz(3)/2 scrsz(4)/2 (scrsz(3)-2)/2 scrsz(4)/2],'Name','[II] Changing Process of actural data','NumberTitle','off');
nperseries = zeros(3000:9000:rdcnt);
% data read and process
for rdnum = 1:1:rdcnt
yydoy = yyyymmdd2yydoy(yyyy,mm,dd);
shr = sprintf('%02d',hr);
smn = sprintf('%02d',mn);
gzfilename = [NDIR, 'rgccs', yydoy, shr, smn, '.bin.gz'];
filename = ['rgccs', yydoy, shr, smn, '.bin'];
disp(['Reading Data in ', filename,' ......']);
unix(['gunzip -c ', gzfilename, ' > ', filename]);
fid = fopen(filename, 'r', 'b');
npers = fread(fid, [9000 3000], 'int16', 'ieee-le');
fclose(fid);
npers = (npers/100)';
npers2 = npers;
npers3 = ones(3000,9000);
npers(find(npers<0)) = nan;
npers2(find(npers2<0)) = 0;
npers3(isnan(npers)) = 0;
nperseries(:,:,rdnum)=npers;
totp = totp+npers2;
cnt = cnt+npers3;
disp(['Read Data in ', filename, ' sucessfully!']);
unix(['rm ',filename]);
if mn == 15
mn = 45;
else
mn = 15;
hr = hr+1;
if hr == 24
hr = 00;
dd = dd+1;
end
end
% mean
avep=totp./cnt;
avep_selected=avep.*mask;
npers_selected=npers.*mask;
% plot daily data & mean
figure(figure1);
imagesc(avep_selected);
colorbar;
figure(figure2);
imagesc(npers_selected);
colorbar;
end
IsRepeat = input('Do you want to see the simulation of areal precipitaion changing process (Y/N):','s');
if IsRepeat == 'Y' || IsRepeat == 'y'
disp('The animation is shown in figure [II], press "ctrl+c" to exit!')
rptcnt = 0;
while 1
rdnum = rem(rptcnt,rdcnt)+1;
figure(figure2);
imagesc(nperseries(:,:,rdnum));
colorbar;
pause(0.5);
rptcnt = rptcnt+1;
end
else
close all;
disp('===================End of persiann simulation @Prashant Kuranjekar. 2007===================');
end
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
clear all;
RDIR = ['/mnt/data01/goesdata/globccs/nesdis/'];
%RDIR = ['/mnt/data01/goesdata/globccs/rapids/'];
%RDIR = ['/mnt/dans/goesdata/globccs/merge/out_post/'];
RES = 0.04;
%nesdis
hthr=20;
%dthr=8;
dthr=7;
%Rapids
%hthr=40;
%dthr=10;
%Ask for the input
% file = Erin;
% file3 = Ptot_hrly;
% file1 = Pcnt_dly;
% file2 = Pcnt;
%
% year = 2007;
% dstart= 221;
% dend = 232;
% hstart= 0;
% hend = 23;
%________________________________________________________________________
% lower_left_long = -110;
% lower_right_long = -85;
% lower_left_lat = 25;
% upper_left_lat = 38;
%Ask for the input
file = input('Enter the file\n','s');
file3 = input('Enter the filename for Ptot_hrly\n','s');
file1 = input('Enter the filename for Pcnt_dly\n','s');
file2 = input('Enter the filename for Pcnt\n','s');
year = input('Enter the year\n');
dstart= input('Enter the start day\n');
dend = input('Enter the end day\n');
hstart= input('Enter the start hour\n');
hend = input('Enter the end hour\n');
% %________________________________________________________________________
lower_left_long = input('Enter the lower left longitude\n');
lower_right_long = input('Enter the lower right longitude\n');
lower_left_lat = input('Enter the lower left latitude\n');
upper_left_lat = input('Enter the upper left latitude\n');
%________________________________________________________________________
if year<2000
year = num2str(year-1900);
yr=[year];
end
if year==2000
year = strcat(['0',num2str(year-2000)]);
yr=[year];
end
if 2000<year<2010
year = strcat(['0',num2str(year-2000)]);
yr=[year];
end
if year>=2010
year = num2str(year-2000);
yr=[year];
end
cellsize = 0.04;
ROWS = 3000;
COLS = 9000;
nodata_value = -99;
Xref = 0.02;
Yref = 59.8;
X = 0;
Y = 0;
Xref_adj = round(((Xref-X)/cellsize)+0.5)+4500;
Yref_adj = 1500-round((Yref-Y)/cellsize);
%______________________________________________________________________
%PART 1
if (lower_left_long>=-180 & lower_left_long<=180)
left_col= round(((lower_left_long-Xref)/cellsize)+0.5)+Xref_adj;
end
if (lower_right_long>=-180 & lower_right_long<=180)
Right_col= round(((lower_right_long-Xref)/cellsize)-0.5)+Xref_adj;
end
ncols=(Right_col-left_col)+1;
%PART 2
if (lower_left_lat>=-90 & lower_left_lat<=90)
lower_lat= Yref_adj+round((Yref-lower_left_lat)/cellsize);
end
if (upper_left_lat>=-90 & upper_left_lat<=90)
upper_lat= Yref_adj+1+round((Yref-upper_left_lat)/cellsize);
end
nrows = (lower_lat-upper_lat)+1;
Ptot=zeros(nrows,ncols);
Pcnt=zeros(nrows,ncols);
%___________________________________________________________________
for day = dstart:dend
if (day<10)
day = strcat(['0','0',num2str(day)]);
elseif (day<100)
day = strcat(['0',num2str(day)]);
else day = num2str(day);
day =[day];
end
%_____________________________________________________________________
Ptot_dly=zeros(nrows,ncols);
Pcnt_dly=zeros(nrows,ncols);
for hour=hstart:hend
if (hour<10)
hh = strcat(['0', num2str(hour)]);
else hh = num2str(hour);
end
%_____________________________________________________________________
Ptot_hrly=zeros(nrows,ncols);
for minutes=15:30:45
%for minutes=mstart:mend
if (minutes<10)
mm = strcat(['0', num2str(minutes)]);
else mm = num2str(minutes);
end
%_______________________________________________________________________
%_______________________________________________________________________
%_______________________________________________________________________
yrdayhhmm = strcat(yr,day,hh,mm);
%________________________________________________________________________
%str = [ '!gunzip -c ' RDIR 'rgccs' yrdayhhmm '.bin.gz > /tmp/rgccs' yrdayhhmm '.bin' ];
%eval(str);
str = [ '!gunzip -c ' RDIR 'rgccs' yrdayhhmm '.bin.gz > rgccs' yrdayhhmm '.bin' ];
eval(str);
%_________________________________________________________________________
fid=fopen(['rgccs' yrdayhhmm '.bin'],'r','b');
%if fid>=0
pers_mat1=fread(fid,[9000 3000],'short','ieee-le');
%pers_mat1=fread(fid,[9000 3000],'short','ieee-be');
pers_mat = pers_mat1/100;
fclose(fid);
str = [ '!rm rgccs' yrdayhhmm '.bin' ];
eval(str);
Ind = find(pers_mat<0);
pers_mat(Ind)=nan;
p = (pers_mat)';
a = [p(:,4501:9000) p(:,1:4500)];
a;
%end
%_____________________________________________________________________
m= [a(upper_lat:lower_lat,left_col:Right_col)];
m = 0.5*m;
%save m m -ascii;
%save(strcat([file,yr,day,hh,mm,'.asc']),'m','-ascii');
%_________________________________________________________________________
nanid=~isnan(m);
m(find(isnan(m)==1))=0;
%m=nanid.*m;
Ptot_hrly=Ptot_hrly+m;
Ptot_dly=Ptot_dly+m;
Pcnt_dly=Pcnt_dly+nanid;
%___________________________________________________________________________
end%minutes
save(strcat([file3,yr,day,hh,'.asc']),'Ptot_hrly','-ascii');
end%hrly
save(strcat([file1,yr,day,'.asc']),'Pcnt_dly','-ascii');
Ptot_dly=Ptot_dly./Pcnt_dly*(hend-hstart+1);
ind=find(Pcnt_dly<hthr);
Ptot_dly(ind)=NaN;
nanid=~isnan(Ptot_dly);
Ptot_dly(find(isnan(Ptot_dly)==1))=0;
save(strcat([file,yr,day,'.asc']),'Ptot_dly','-ascii');
%Ptot_dly=nanid.*Ptot_dly;
%Ptot=zeros(nrows,ncols);
%Pcnt=zeros(nrows,ncols);
Ptot=Ptot+Ptot_dly;
Pcnt=Pcnt+nanid;
end%day
Total_accumulated_days = dend-dstart+1;
Total_accumulated_days = num2str(Total_accumulated_days);
save(strcat([file2,yr,Total_accumulated_days,'.asc']),'Pcnt','-ascii');
Ptot=Ptot./Pcnt*(dend-dstart+1);
ind=find(Pcnt<dthr);
Ptot(ind)=NaN;
save(strcat([file,yr,Total_accumulated_days,'.asc']),'Ptot','-ascii');
imagesc(Ptot);
colorbar
% %read country boundary
% fid=fopen('country.bin','r','b');
% coun_bin=fread(fid,[2 478870/2],'float');
% country = (coun_bin)';
% Ind = find(country<-400);
% country(Ind)=nan;
% country;
% %plotting
% xx = (country(:,1)*25)+4500.5;
% yy = (country(:,2)*-25)+1500.5;
% xy = [xx yy];
% figure;
% imagesc(Ptot);
% hold on;
% plot(xy(:,1),xy(:,2),'w');
% xlabel('longitude');
% ylabel('Latitude');
% %title('Trend of snow water equivalent')
% vec1 = [1000 1000 1000 1000 1000 1000 1000 1000 1000];
% vec2 = [0 vec1];
% vec3 = cumsum(vec2);
% vectick=[' 0'; ' 40'; ' 80'; '120'; '160'; '200'; '240'; '280'; '320'; '360'];
% set(gca,'Xtick',vec3);
% set(gca,'XtickLabel',vectick);
% %_____________________________________________________________________
% vec4 = [500 500 500 500 500 500];
% vec5 = [0 vec4];
% vec6 = cumsum(vec5);
% vectick=['60'; '40'; '20'; ' 0'; '20'; '40'; '60'];
% set(gca,'Ytick',vec6);
% set(gca,'YtickLabel',vectick);
% %_________________________________________________________________________
% %k = strcat([file,yr,day,hh,'.jpeg']);
k = strcat([file,yr,Total_accumulated_days,'.bmp']);
%print('-djpeg',k);
print('-dbmp',k);
colorbar
Monday, April 6, 2015
VB - Get the DTHETA value for each of the grid cell
This is a VB code inserted in a Manifold program. Here is the task to compute DTHETA value to assign each of the grid cell as per the soil condition. The weighted average DTHETA value for the each of the grid cell has been computed as well.
Function DTHETA
Sat = Record.Data("InitSat")
XKSAT = Record.DataText("AvgXKSAT")
If Sat = "Dry" Then
If XKSAT <= 0.15 Then
DTHETA = EXP(-0.2394+(0.3616*log(XKSAT)))
ElseIf XKSAT <= 0.25 Then
DTHETA = EXP(-1.4122-(0.2614*log(XKSAT)))
Else
DTHETA = 0.35
End If
ElseIf Sat = "Normal" Then
If XKSAT <= 0.02 Then
DTHETA = EXP(1.6094+log(XKSAT))
ElseIf XKSAT <= 0.04 Then
DTHETA = EXP(-0.0142+(0.5850*log(XKSAT)))
ElseIf XKSAT <= 0.1 Then
DTHETA = 0.15
ElseIf XKSAT <= 0.15 Then
DTHETA = EXP(1.0038+(1.2599*log(XKSAT)))
ElseIf XKSAT <= 0.4 Then
DTHETA = 0.25
Else
DTHETA = EXP(-1.2342+(0.1660*log(XKSAT)))
End If
Else
DTHETA = 0
End If
End Function
Function ADTHETA
D = Record.Data("DTHETA")
A = Record.Data("Area (I)")
ADTHETA = D*A
End Function
Function WDTHETA
W = Record.Data("ADTHETA")
A = Record.Data("Area (I)")
WDTHETA = W/A
End Function
Thursday, April 2, 2015
R - Transpose a Matrix
Let's say that we need to arrange a Hydrograph output in such a way that we can insert that results into HEC-1 input file.
The structure of hydrograph output file is 721 (Rows) by 1 (Columns)
Inorder to input the result into HEC-1, I need to arrange in n (Rows) by 10 (Columns)
Here is the R code..
workDir <- "C:/HEC-1 Installation/HEC1EXE/"
setwd(workDir)
inData <- read.csv("100yr24hr PLR 107cs.txt", header=FALSE, sep=",", as.is=TRUE)
#Mat1 <- matrix(inData$V1, 721, 10, byrow = T)
Mat1 <- matrix(inData$V1, ncol = 10, byrow = T)
write.table(Mat1, "100yr24hr PLR 107cs Transpose.txt", sep="\t", col.names=FALSE, row.names=FALSE)
The resultant file will be
Python (ArcPy) - Get rid of suffixes from multiple shapefiles in a folder
Let's say that I have several shapefiles in a folder and I would like to remove the suffixes from each shapefile and overwrite the previous files.
For Example:
Here is the Python (ArcPy) code..
import os, sys, arcpy
#InFolder = sys.argv[1] # make this a hard set path if you like
InFolder = "I:\\python\\GIS-data-Management\\Globalmapper" # make this a hard set path if you like
arcpy.env.workspace = InFolder
CapitalKeywordsToRemove = ["_AREAS","_LINES"]# MUST BE CAPITALS
DS_List = arcpy.ListFeatureClasses("*.shp","ALL") # Get a list of the feature classes (shapefiles)
for ThisDS in DS_List:
NewName = ThisDS # set the new name to the old name
# replace key words, searching is case sensitive but I'm trying not to change the case
# of the name so the new name is put together from the original name with searching in
# upper case, use as many key words as you like
for CapKeyWord in CapitalKeywordsToRemove:
if CapKeyWord in NewName.upper():
# remove the instance of CapKeyWord without changing the case
NewName = NewName[0:NewName.upper().find(CapKeyWord)] + NewName[NewName.upper().find(CapKeyWord) + len(CapKeyWord):]
if NewName != ThisDS:
if not arcpy.Exists(NewName):
arcpy.AddMessage("Renaming " + ThisDS + " to " + NewName)
arcpy.Rename_management(ThisDS , NewName)
else:
arcpy.AddWarning("Cannot rename, " + NewName + " already exists")
else:
arcpy.AddMessage("Retaining " + ThisDS)
Tuesday, March 31, 2015
Python (ArcPy) - Create multiple geodatabase from the list
Let's say that I would like to create multiple geodatabase in a folder.
Here is the Python (ArcPy) code is..
import arcpy
from arcpy import env
env.workspace = "I:/python/MultipleFD2GDB"
fdList = ["EMI_EMF", "Cultural_Resources", "Parcels", "Hazardous_Materials", "Footprint", "Checkpoint_B", "Wetlands", "Botany", "Land_Use", "Buffers", "Air_Quality", "Transportation", "Topography", "Aesthetics_VisualQuality", "Socioeconomics", "Noise_and_Vibe", "Safety_and_Security", "Alignments", "Annotation", "Mapbook_Grids", "Biology", "Geology", "Public_Utilities", "Agriculture", "Public_Lands", "Hydrology", "BSA", "Overview", "Cumulative", "Alternatives_Analysis", "Permission_to_Enter", "Public_Affairs", "Right_of_Way", "Wells", "Wind_Energy"]
for fd in fdList:
arcpy.CreateFileGDB_management("I:/python/MultipleFD2GDB", fd)
Python (ArcFy) - Create multiple FD (Feature Datasets) in a Single GDB (Geodatabase)
Let's say I would like to create several feature datasets in a single geodatabase with a known FD name.
This is how the GDB structure will going to be ....
Here is the Python (ArcPy) code..
import arcpy
from arcpy import env
env.workspace = "I:/python/MultipleFD2GDB"
arcpy.CreateFileGDB_management("I:/python/MultipleFD2GDB", "HabitatAnalysis.gdb")
fdList = ["EMI_EMF", "Cultural_Resources", "Parcels", "Hazardous_Materials", "Footprint", "Checkpoint_B", "Wetlands", "Botany", "Land_Use", "Buffers", "Air_Quality", "Transportation", "Topography", "Aesthetics_VisualQuality", "Socioeconomics", "Noise_and_Vibe", "Safety_and_Security", "Alignments", "Annotation", "Mapbook_Grids", "Biology", "Geology", "Public_Utilities", "Agriculture", "Public_Lands", "Hydrology", "BSA", "Overview", "Cumulative", "Alternatives_Analysis", "Permission_to_Enter", "Public_Affairs", "Right_of_Way", "Wells", "Wind_Energy"]
for fd in fdList:
arcpy.CreateFeatureDataset_management("I:/python/MultipleFD2GDB/HabitatAnalysis.gdb", fd, "I:/python/MultipleFD2GDB/2229.prj")
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
Let's say I would like to create several feature datasets from a list present in a separate Geodatabase.
import arcpy
from arcpy import env
env.workspace = "I:/python/MultipleFD2GDB"
arcpy.CreateFileGDB_management("I:/python/MultipleFD2GDB", "HabitatAnalysis.gdb")
#create a search cursor
rows = arcpy.SearchCursor(r'I:\python\MultipleFD2GDB\HabitatAnalysis1.gdb\fds_name')
#loop through the rows in the cursor to get the name from the field and then create the fds
for row in rows:
outfds = row.field_fds_name
arcpy.CreateFeatureDataset_management("I:/python/MultipleFD2GDB/HabitatAnalysis.gdb", outfds, "I:/python/MultipleFD2GDB/2229.prj")
JavaScript Web MAP API
Working on a Web map API on a California High Speed Rail Project
Web Map API
http://arcgis.tylin.com
Python (ArcPy) - Processing Multiple Breakline files in a folder
Let's say I get multiple breakline
files from a CAD person in below mentioned format. So I am calling this as a “BreaklineInput1.txt, BreaklineInput2.txt, BreaklineInput3.txt, BreaklineInput4.txt” files.
The sample input file can be downloaded from the below mentioned link.
The file format looks like this one
All the files needs to get converted into the below mentioned format
The output file will going to look like this one in a folder.
Here is the Python (ArcPy) code..
import os, sys
#directory holding the input files
inDir = r'J:\Thumb drive\Technical\GIS Exercise\Chapters\Chapter 14 - R by example in Statistics & Geospatial Analysis\R\reneedyourhelpmann\Incomplete\Breakline\Multiple Breakline'
#Find all text files in the specified directory
for inFile in os.listdir(inDir):
if inFile.endswith(".txt"):
#Name the output file the same as the input, but with _output appended to the name
outFile = inFile.replace(".txt","_output.txt")
#Open the input file for reading
with open(os.path.join(inDir, inFile), 'r') as inF:
#Open the output file for writing
with open(os.path.join(inDir, outFile), 'w') as outF:
#Read in the first line
firstFeature = True
row = inF.readline()
#Loop while there are still lines in the input file
while(row is not None and len(row) > 0):
#Ignore the headers, specified with ;
if (row[0] != ";"):
#remove the leading and trailing spaces
row = row.strip()
#replace the inconsistent space-delimiters with commas
row = row.replace(" ",",").replace(" ","").replace(" ",",")
#separate the components
tokens = row.split(",")
#If the last parameter is 1, start writing out a new feature
if(tokens[3] == '1'):
if not (firstFeature):
outF.write("END\n")
else:
firstFeature = False
outF.write('3\n')
#Write out the coordinates
outF.write(tokens[0] + " " + tokens[1] + " " + tokens[2] + "\n")
#otherwise just write out the coordinates
else:
outF.write(tokens[0] + " " + tokens[1] + " " + tokens[2] + "\n")
#Read in the next line
row = inF.readline()
#Read the next line
else:
row = inF.readline()
#Close off the final feature
outF.write("END\n")
print("Finished")
Python (ArcPy) - Processing Single Breakline file
I get a breakline file from a CAD person in below mentioned
format. So I am calling this as a “Input” file. The input file can be
downloaded from the below mentioned link.
Inorder to accept breakline into ArcGIS, We need to change it to below mentioned format.
Here is the Python (ArcPy) Code....
import os, sys
inFile = r'J:\Thumb drive\Technical\GIS Exercise\Chapters\Chapter 14 - R by example in Statistics & Geospatial Analysis\R\reneedyourhelpmann\Incomplete\Breakline\BreaklineInput.txt'
outFile = r'J:\Thumb drive\Technical\GIS Exercise\Chapters\Chapter 14 - R by example in Statistics & Geospatial Analysis\R\reneedyourhelpmann\Incomplete\Breakline\BreaklineOutput.txt'
firstFeature = True
#Open the input file for reading
with open(inFile, 'r') as inF:
#Open the output file for writing
with open(outFile, 'w') as outF:
#Read in the first line
row = inF.readline()
#Loop while there are still lines in the input file
while(row is not None and len(row) > 0):
#Ignore the headers, specified with ;
if (row[0] != ";"):
#remove the leading and trailing spaces
row = row.strip()
#replace the inconsistent space-delimiters with commas
row = row.replace(" ",",").replace(" ","").replace(" ",",")
#separate the components
tokens = row.split(",")
#If the last parameter is 1, start writing out a new feature
if(tokens[3] == '1'):
if not (firstFeature):
outF.write("END\n")
else:
firstFeature = False
outF.write('3\n')
#Write out the coordinates
outF.write(tokens[0] + " " + tokens[1] + " " + tokens[2] + "\n")
#otherwise just write out the coordinates
else:
outF.write(tokens[0] + " " + tokens[1] + " " + tokens[2] + "\n")
#Read in the next line
row = inF.readline()
#Read the next line
else:
row = inF.readline()
#Close off the final feature
outF.write("END\n")
print("Finished")
Python (ArcPy) - gdb2TOC - Importing file geodatabase (GDB) to table of contents (TOC) of ArcMap?
I have a
geodatabase with several feature dataset and feature classes associated with it
respectively. I would like to create a .mxd where the Table of contents consist
of group layer and each group layer has several shapefile. The group layer name
should be same as Feature dataset name.The feature classes in each feature
dataset comes shapefiles with in that particular group layer as shown in figure.
import os,arcpy
gdb=r'I:\python\GroupLayer\ABC.gdb'
template_mxd = r'I:\python\GroupLayer\empty.mxd'
template_group_layer = r'I:\python\GroupLayer\empty_group.lyr'
output_mxd = r'I:\python\GroupLayer\not_empty.mxd'
mxd=arcpy.mapping.MapDocument(template_mxd)
df = arcpy.mapping.ListDataFrames(mxd)[0]
groups={}
for path, dirs, files in arcpy.da.Walk(gdb):
#Any "folder" in a GDB is a Feature Dataset
#and there can only be a single level
for d in dirs:
#Add an empty group layer to ArcMap
lyr=arcpy.mapping.Layer(template_group_layer)
lyr.name=d
arcpy.mapping.AddLayer(df,lyr)
groups[d]=arcpy.mapping.ListLayers(mxd,d,df)[0]
for f in files:
fp=os.path.join(path,f)
dsc=arcpy.Describe(fp)
lyr=None
view=None
if dsc.dataType == 'FeatureClass':
lyr=arcpy.management.MakeFeatureLayer(fp)[0]
elif dsc.dataType == 'RasterDataset':
lyr=arcpy.management.MakeRasterLayer(fp)[0]
elif dsc.dataType == 'Table':
view=arcpy.management.MakeTableView(fp)[0]
else:continue
if path==gdb and lyr:
lyr.visible=False
arcpy.mapping.AddLayer(df,lyr)
elif view:
arcpy.mapping.AddTableView(df, view)
else:
d=os.path.basename(path)
arcpy.mapping.AddLayerToGroup(df, groups[d], lyr, "BOTTOM")
mxd.saveACopy(output_mxd)
The result in TOC will be...
Python (ArcPy) - fld2gdb - Exporting Folder of shapefiles to file geodatabase (GDB)?
I have a multiple folder of shapefiles with several shapefiles in each
folder. I want to create a geodatabase where the name of feature datasets name
should resemble the name of the folders respectively. The shapefiles in each
folder should get exported as a Feature classes in Feature dataset. Each
feature class name should resemble the shapefile names respectively.
Here is the Python (ArcPy) code....
import arcpy
import os
ShapefilesDirectory = r"I:\python\SampleData\Shapefiles"
arcpy.CreateFileGDB_management("I:/python/SampleData", "Shapefiles.gdb")
arcpy.env.workspace = ShapefilesDirectory
print "Building List of Folder of Shapefiles..."
fldList = arcpy.ListWorkspaces('*','Folder')
print str(fldList)
if len(fldList) == 0:
print "No folders with shapefiles found"
else:
for fld in fldList:
print fld
arcpy.CreateFeatureDataset_management("I:/python/SampleData/Shapefiles.gdb", fld, "I:/python/MultipleFD2GDB/2229.prj")
if os.path.exists(Shapefiles.gdb + "\\" + fld) == False:
print "Creating " + fld + " Directory..."
arcpy.CreateFolder_management(Shapefiles.gdb + "\\" + fld)
import arcpy
ShapefilesDirectory = r"I:\python\SampleData\Shapefiles"
# arcpy.CreateFileGDB_management("I:/python/SampleData", "Shapefiles.gdb")
arcpy.env.workspace = ShapefilesDirectory
print "Building List of Folder of Shapefiles..."
fldList = arcpy.ListWorkspaces('*','Folder')
print str(fldList)
if len(fldList) == 0:
print "No folders with shapefiles found"
else:
for fld in fldList:
print fld
Monday, March 30, 2015
Python (ArcPy) - gdb2fld - Exporting file geodatabase (GDB) to Folder of shapefiles
I have a several geodatabase in a folder with several feature datasest
and each feature datasets consists of several feature classes associated
with it. I would like to create a individual geodatabase folder and I want to
export each feature datasets as a sub- folder and feature classes associated
with each feature datasets will get stored in the respective folder as shown in
the figure.

Here is a Python (ArcPy) code..
import arcpy
import os
GDB = r"C:\Gdb2Shp\gdb2fld\Shapefiles.gdb"
ShapefilesDirectory = r"C:\Gdb2Shp\gdb2fld"
arcpy.env.workspace = GDB
print "Building List of Feature Datasets..."
FeatureDatasets = arcpy.ListDatasets("", "Feature")
if len(FeatureDatasets) == 0:
raise Exception("No FeatureDatasets found.")
print "Returned " + str(len(FeatureDatasets)) + " FeatureDatasets..."
for FeatureDataset in FeatureDatasets:
if os.path.exists(ShapefilesDirectory + "\\" + FeatureDataset) == False:
print "Creating " + FeatureDataset + " Directory..."
arcpy.os.makedirs(ShapefilesDirectory + "\\" + FeatureDataset)
outputDirectory = ShapefilesDirectory + "\\" + FeatureDataset
print "Building List of FeatureClasses..."
FeatureClasses = arcpy.ListFeatureClasses("", "", FeatureDataset)
print "Returned " + str(len(FeatureClasses)) + " FeatureClasses..."
for FeatureClass in FeatureClasses:
print "Exporting '" + FeatureClass + "' FeatureClass..."
arcpy.FeatureClassToShapefile_conversion(FeatureClass, outputDirectory)
print "Complete."
Here is a Python (ArcPy) code..
import arcpy
import os
GDB = r"C:\Gdb2Shp\gdb2fld\Shapefiles.gdb"
ShapefilesDirectory = r"C:\Gdb2Shp\gdb2fld"
arcpy.env.workspace = GDB
print "Building List of Feature Datasets..."
FeatureDatasets = arcpy.ListDatasets("", "Feature")
if len(FeatureDatasets) == 0:
raise Exception("No FeatureDatasets found.")
print "Returned " + str(len(FeatureDatasets)) + " FeatureDatasets..."
for FeatureDataset in FeatureDatasets:
if os.path.exists(ShapefilesDirectory + "\\" + FeatureDataset) == False:
print "Creating " + FeatureDataset + " Directory..."
arcpy.os.makedirs(ShapefilesDirectory + "\\" + FeatureDataset)
outputDirectory = ShapefilesDirectory + "\\" + FeatureDataset
print "Building List of FeatureClasses..."
FeatureClasses = arcpy.ListFeatureClasses("", "", FeatureDataset)
print "Returned " + str(len(FeatureClasses)) + " FeatureClasses..."
for FeatureClass in FeatureClasses:
print "Exporting '" + FeatureClass + "' FeatureClass..."
arcpy.FeatureClassToShapefile_conversion(FeatureClass, outputDirectory)
print "Complete."
R - Make a folder of each shapefile and zip it individually in a folder
Let's say there are several shapefiles in a folder. I would like to make a folder of each shapefile and zip those individual folder. This activity will help in uploading individual shapefile in ArcGIS online server. ArcGIS online server only accepts the zip folder of individual shapefile.
Here is the R code...
shp_file_names <- list.files("Shapefile zipped/", include.dirs = FALSE)
shp_uniq <- do.call("rbind", strsplit(shp_file_names, "\\."))[, 1]
shp_uniq <- unique(shp_uniq)
for (eachFile in shp_uniq) {
dir.create(paste0("shape_unzip/", eachFile))
shp_files <- list.files("Shapefile zipped/", pattern = eachFile)
file.copy(paste0("Shapefile zipped/", shp_files),
paste0("shape_unzip/", eachFile, "/", shp_files))
}
shp_folders <- list.files("shape_unzip/")
for (eachFolder in shp_folders) {
zipCmd <- paste0("zip -r ", paste0("shape_zip/", eachFolder), " .")
system(zipCmd, invisible = TRUE, show.output.on.console = FALSE)
}
R - Selection of rows based on column criteria
Codes used to process LiDAR Original txt file.
Here you will see that the 5th column has 1's and 2's. This code will consider rows with only 2's. 2's value represent ground points.
The sample file can be download from the below mentioned link.
Text file has 1'2 and 2's in the 5th column
Here is the R code...
rm(list=ls())
workDir <- "C:/Users/PKuranjekar/Desktop/Thumb drive/R/reneedyourhelpmann/"
setwd(workDir)
inData <- read.csv("Selection of rows2.txt", header=FALSE, sep=",", as.is=TRUE)
outData <- subset(inData, inData$V5=="2", select = V1:V3)
write.table(outData, "code2_output2.txt", sep=",", col.names=FALSE, row.names=FALSE)
Sunday, March 29, 2015
R - Combine several LiDAR xyz point csv file horizontally
Lets say we have xyz points for several LiDAR files adjacent vertically and horizontally to each other
Here is the R code...
rm(list=ls())
workDir <- "J:/Thumb drive/Technical/GIS Exercise/Chapters/Chapter 14 - R by example in Statistics & Geospatial Analysis/R/reneedyourhelpmann/Incomplete/Row bind points file as per the txt file/"
setwd(workDir)
( p <- list.files(getwd(), pattern="pts$") )
ptdf <- read.table(p[1], header=FALSE)
names(ptdf) <- c("x","y","z")
p <- read.table("DataList.txt")
( p <- as.character(p[,1]) )
for(i in 2:length(p)) {
d <- read.table(p[i], header=FALSE)
names(d) <- c("x","y","z")
ptdf <- rbind(ptdf, d)
}
write.csv(ptdf, "FileName.csv", row.names=FALSE, quote=FALSE)
You can even write out a point shapefile, if so desired.
require(sp)
require(rgdal)
coordinates(ptdf) <- ~X+Y
writeOGR(ptdf, getwd(), "OutShape", driver="ESRI Shapefile")
Python (ArcPy) - Find Non-Matching rows in-between 2 text files
Lets say we have generated a 2 csv from different runs from the mentioned link.
This will create 2 csv file and each csv file will have missing rows GDB, FD, FC from comparison between 2 files one at a time.
Here is the Python (ArcPy) code....
import arcpy, os
fileA = arcpy.GetParameterAsText(0)
fileB = arcpy.GetParameterAsText(1)
outputDir = arcpy.GetParameterAsText(2)
fileADict = {}
fileAUniqueList = []
fileBUniqueList = []
# Loop through fileA rows, adding each to a dictionary
fileAObj = open(fileA, 'r')
for fileARow in fileAObj:
fileADict[fileARow] = False
fileAObj.close()
# Loop through fileB, checking against dictionary
fileBObj = open(fileB, 'r')
for fileBRow in fileBObj:
# If match in fileA dictionary, set dictionary match value to true
if fileBRow in fileADict.keys():
fileADict[fileBRow] = True
# If no match, then unique to B.
else:
fileBUniqueList.append(fileBRow)
fileBObj.close()
# Loop through fileA dictionary, checking match value. Unique to A if match is false
for key, match in fileADict.iteritems():
if not match:
fileAUniqueList.append(key)
# Write files containing unique values
fileAUnique = open(outputDir + os.sep + "uniquetoA.txt", "w")
for fileAItem in fileAUniqueList:
fileAUnique.write(fileAItem)
fileAUnique.close()
fileBUnique = open(outputDir + os.sep + "uniquetoB.txt", "w")
for fileBItem in fileBUniqueList:
fileBUnique.write(fileBItem)
fileBUnique.close()
----------------------------------------------------------------------------------------------------------------------------------------------
#Note that in the above, arcpy isn't actually necessary for the core code. I use it here because I like the ArcGIS Toolbox method for obtaining initial parameters (and, in fact, the way the code is written it only runs properly through an ArcGIS Toolbox). However, if you prefer, you could always change it to use simple command line or even something like tkinter to get the input files and output location.
#Also note that when iterating through the dictionary items I used iteritems() because I'm using a 2.X version of python. If using python 3.X I believe you may need to change that to items().
import arcpy, os
fileA = r'T:\SanFrancisco\Legacy\Projects\CHSR\B-P\GIS\GDBs\GDB2csv_SFO2.txt'
fileB = r'I:\GDBs\GDB2csv_SFO2.txt'
outputDir = r'I:\GDBs'
fileADict = {}
fileAUniqueList = []
fileBUniqueList = []
# Loop through fileA rows, adding each to a dictionary
fileAObj = open(fileA, 'r')
for fileARow in fileAObj:
fileADict[fileARow] = False
fileAObj.close()
# Loop through fileB, checking against dictionary
fileBObj = open(fileB, 'r')
for fileBRow in fileBObj:
# If match in fileA dictionary, set dictionary match value to true
if fileBRow in fileADict.keys():
fileADict[fileBRow] = True
# If no match, then unique to B.
else:
fileBUniqueList.append(fileBRow)
fileBObj.close()
# Loop through fileA dictionary, checking match value. Unique to A if match is false
for key, match in fileADict.iteritems():
if not match:
fileAUniqueList.append(key)
# Write files containing unique values
fileAUnique = open(outputDir + os.sep + "uniquetoA.txt", "w")
for fileAItem in fileAUniqueList:
fileAUnique.write(fileAItem)
fileAUnique.close()
fileBUnique = open(outputDir + os.sep + "uniquetoB.txt", "w")
for fileBItem in fileBUniqueList:
fileBUnique.write(fileBItem)
fileBUnique.close()
R - Find Non-Matching rows in-between 2 text files
Let's say we have generated a 2 csv from different runs from the mentioned link.
Created List of FD and FC from each of GDB to a csv file.
This will create 2 csv file and each csv file will have missing rows of GDB, FD, FC from comparison between 2 files one at time.
Here is the R Code.....
setwd("J:/Thumb drive/Technical/GIS Exercise/Chapters/Chapter 14 - R by example in Statistics & Geospatial Analysis/R/reneedyourhelpmann/Incomplete/Non Matching Rows/")
# Read data and concatenate fields
f1 <- read.csv("GDB_output_More.txt", header=TRUE, na.strings = " ")
f1.cat <- paste( f1[,1], f1[,2], f1[,3], sep="-" )
f2 <- read.csv("GDB_output_Less.txt", header=TRUE, na.strings = " ")
f2.cat <- paste( f2[,1], f2[,2], f2[,3], sep="-" )
# Create vector of "non-matching" values
( x <- f1.cat[-which(f1.cat %in% f2.cat)] )
# Create dataframe of results matching columns of source data
missing <- as.data.frame(t(unlist(strsplit(x[1], "-"))))
for(i in 2:length(x)) {
missing <- rbind(missing, as.data.frame(t(unlist(strsplit(x[i], "-")))))
}
names(missing) <- names(f1)
missing
# Write csv of results
write.csv(missing, "MissingData.csv", row.names=FALSE, quote=FALSE)
Saturday, March 28, 2015
R - Multiple subsetting a large xyz file with several polygons
Lets say you have csv file of size more than 4 GB and you want to extract xyz points with a certain boundary. If you need to extract xyz points for multiple boundaries then it will require some steps in ArcGIS program. Here i am demonstrating an R code to extract xyz points for multiple boundaries at a time.
Here is an example,
I have combined 5 LiDAR files into 1 "input.csv" file and I would like to extract xyz points for 3 multiple boundaries for each of their individual output.csv file.
I have created a csv file having Xmin, Ymin, Xmax, Ymax for each of the boundary corner points.
rm(list=ls())
library(sp)
workDir <- "J:/Thumb drive/Technical/GIS Exercise/Chapters/Chapter 14 - R by example in Statistics & Geospatial Analysis/R/reneedyourhelpmann/Incomplete/Multiple Subsetting/"
setwd(workDir)
#read in points from csv
input <- read.csv("input.csv", header=F)
names(input) <- c("x","y","z")
# create spatial points object
pts <- SpatialPoints(input[,1:2])
# read in bounding box coords
boxes <- read.table("XY(min,max).txt", sep=",")
names(boxes) <- c("id", "xmin", "ymin","xmax", "ymax")
# expand to coordinates for bounding box rectangles
poly.coords <- cbind(boxes$xmin, boxes$ymin,
boxes$xmin, boxes$ymax,
boxes$xmax, boxes$ymax,
boxes$xmax, boxes$ymin,
boxes$xmin, boxes$ymin)
ID <- boxes$id
# create polygons for bounding box rectangles
polys <- SpatialPolygons(mapply(function(poly, id) {
xy <- matrix(poly, ncol=2, byrow=TRUE)
Polygons(list(Polygon(xy)), ID=id)
}, split(poly.coords, row(poly.coords)), ID))
polys.df <- SpatialPolygonsDataFrame(polys, data.frame(id=ID, row.names=ID))
# overlay points on polygons, get polygon ID and add to input csv
input <- cbind(input, over(pts, polys.df))
# write out csv only for points that fall within bounding box polygons
output <- input[!is.na(input$id),]
head(output)
write.csv(output, "output.csv", row.names=F)
> x y z id
> 102423 6521081 2056150 2544.94 B1
> 102424 6521054 2056184 2544.61 B1
> 102425 6521030 2056193 2544.75 B1
> 102427 6521073 2056161 2544.91 B1
> 102429 6521035 2056142 2545.27 B1
> 102432 6521046 2056171 2544.81 B1
# write out three separate files, no quotes around the ID column
write.csv(output[output$id == "B1",], "outputB4.csv", row.names=F, quote=F)
write.csv(output[output$id == "B2",], "outputB5.csv", row.names=F, quote=F)
write.csv(output[output$id == "B3",], "outputB6.csv", row.names=F, quote=F)
# write out three separate files, no quotes around the ID column
for (i in unique(output$id)){
write.csv(output[output$id == i,], paste0("output",i,".csv"), row.names=F, quote=F)
}
This code will generate an outputB1.CSV, outputB2.CSV, outputB3.CSV having xyz file for each of the boundaries.
R -Get the First and Last elevation of each of the floodplain cross-sections
This is one of the output file from Flo-2d modeling. There are 1500 floodplain cross section in the project area.It is very difficult to get the first and Last elevation for each of those floodplain c/s manually. The original file can be downloaded from the below mentioned link.
Sample File
First and Last elevation of each the floodplain cross-sections
Original File
First and Last elevation of each of the Floodplain cross-sections
The file looks like this one:
Here is the code in R:
rm(list=ls())
workDir <- "C:/Users/PKuranjekar/Desktop/Thumb drive/R/reneedyourhelpmann/"
setwd(workDir)
inData <- read.csv("First and Last of each Floodplain.txt", header=FALSE, sep=",", as.is=TRUE)
begRows <- which(is.na(inData[,2]) & is.na(inData[,3]) & inData[,1]!="END")
endRows <- which(is.na(inData[,2]) & is.na(inData[,3]) & inData[,1]=="END")
outFile <- file("code1_output.txt","w")
for (eachCS in 1:length(begRows)) {
writeLines(paste(inData[begRows[eachCS],1], inData[begRows[eachCS]+1,3], inData[endRows[eachCS]-1,3]), outFile)
}
close(outFile)
clear
The output of this code will be a text file with:
and so on.....
R - Find and Change file names
Let's say that we have a text file of having a original file name in 1st column and the file name changes to 3rd column. If the first column file name matches with the file name in the folder, then change the file name to have a file name in 3rd column. The below mentioned link shows you the format of the file.
Firm Panel File Names
The FIRM Panel file names
workDir <- "I:/J_Prashant's External HD/Thumb drive/Technical/GIS Toolbox/R/reneedyourhelpmann/Find and Change file Names/"
setwd(workDir)
inData <- read.csv("FIRM_Panel_Printed_Revised.csv", header=FALSE, sep=",", as.is=TRUE)
FIRM_Panel_file_names_WE <- inData[,1]
FIRM_Panel_file_names_PNG <- paste0(FIRM_Panel_file_names_WE, ".png")
FIRM_Panel_file_names_PGW <- paste0(FIRM_Panel_file_names_WE, ".pgw")
FIRM_Panel_file_nos_Renames_WE <- inData[,3]
FIRM_Panel_file_Renames_PNG <- paste0(FIRM_Panel_file_nos_Renames_WE, ".png")
FIRM_Panel_file_Renames_PGW <- paste0(FIRM_Panel_file_nos_Renames_WE, ".pgw")
FIRM_Panel_file_names <- list.files("FIRM Panels/")
FIRM_Panel_present_PNG <- FIRM_Panel_file_names_PNG %in% FIRM_Panel_file_names
FIRM_Panel_present_PGW <- FIRM_Panel_file_names_PGW %in% FIRM_Panel_file_names
for (eachFile in 1:length(FIRM_Panel_present_PNG)) {
if (FIRM_Panel_present_PNG[eachFile]) {
file.copy(paste0("FIRM Panels/", FIRM_Panel_file_names_PNG[eachFile]),
"Output folder/")
}
}
for (eachFile in 1:length(FIRM_Panel_present_PGW)) {
if (FIRM_Panel_present_PGW[eachFile]) {
file.copy(paste0("FIRM Panels/", FIRM_Panel_file_names_PGW[eachFile]),
"Output folder/")
}
}
select_files <- list.files("Output folder/")
for (eachFile in select_files) {
file_prefix <- unlist(strsplit(eachFile, "\\."))[1]
file_suffix <- unlist(strsplit(eachFile, "\\."))[2]
file_index <- which(file_prefix == inData[, 1])
outfile_name <- paste0(inData[file_index, 3], ".", file_suffix)
file.rename(paste0("Output folder/", eachFile),
paste0("Output folder2/", outfile_name))
}
The original file name changes to new file name in the output folder which are as follows
Firm Panel File Names
The FIRM Panel file names
These are the raster files available in a original folder
workDir <- "I:/J_Prashant's External HD/Thumb drive/Technical/GIS Toolbox/R/reneedyourhelpmann/Find and Change file Names/"
setwd(workDir)
inData <- read.csv("FIRM_Panel_Printed_Revised.csv", header=FALSE, sep=",", as.is=TRUE)
FIRM_Panel_file_names_WE <- inData[,1]
FIRM_Panel_file_names_PNG <- paste0(FIRM_Panel_file_names_WE, ".png")
FIRM_Panel_file_names_PGW <- paste0(FIRM_Panel_file_names_WE, ".pgw")
FIRM_Panel_file_nos_Renames_WE <- inData[,3]
FIRM_Panel_file_Renames_PNG <- paste0(FIRM_Panel_file_nos_Renames_WE, ".png")
FIRM_Panel_file_Renames_PGW <- paste0(FIRM_Panel_file_nos_Renames_WE, ".pgw")
FIRM_Panel_file_names <- list.files("FIRM Panels/")
FIRM_Panel_present_PNG <- FIRM_Panel_file_names_PNG %in% FIRM_Panel_file_names
FIRM_Panel_present_PGW <- FIRM_Panel_file_names_PGW %in% FIRM_Panel_file_names
for (eachFile in 1:length(FIRM_Panel_present_PNG)) {
if (FIRM_Panel_present_PNG[eachFile]) {
file.copy(paste0("FIRM Panels/", FIRM_Panel_file_names_PNG[eachFile]),
"Output folder/")
}
}
for (eachFile in 1:length(FIRM_Panel_present_PGW)) {
if (FIRM_Panel_present_PGW[eachFile]) {
file.copy(paste0("FIRM Panels/", FIRM_Panel_file_names_PGW[eachFile]),
"Output folder/")
}
}
select_files <- list.files("Output folder/")
for (eachFile in select_files) {
file_prefix <- unlist(strsplit(eachFile, "\\."))[1]
file_suffix <- unlist(strsplit(eachFile, "\\."))[2]
file_index <- which(file_prefix == inData[, 1])
outfile_name <- paste0(inData[file_index, 3], ".", file_suffix)
file.rename(paste0("Output folder/", eachFile),
paste0("Output folder2/", outfile_name))
}
The original file name changes to new file name in the output folder which are as follows
Subscribe to:
Posts (Atom)