New file |
| | |
| | | import matplotlib.pyplot as plt |
| | | from matplotlib import cm |
| | | import numpy as np |
| | | import math |
| | | from docxtpl import DocxTemplate |
| | | import flopy |
| | | import flopy.utils.binaryfile as bf |
| | | from matplotlib import cm, colors |
| | | import os |
| | | import time |
| | | import Base as base |
| | | import CalHead |
| | | import WaterXBL |
| | | |
| | | #è·åZå¼ |
| | | def get_flow_Z(model_name,per,layer): |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | mlist = CalHead.muiltyModelList() |
| | | if model_name==base.not_allowed_model: |
| | | dir = base.baseModel2 + "\\modflow.head" |
| | | if model_name in mlist : |
| | | dir = base.muiltyModel + model_name + "\\modflow.head" |
| | | |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | #è·åå¨ææ°æ® |
| | | z = alldata[int(per+1)*3-1,int(layer),:,:] |
| | | z[(z<=0)] = 0 |
| | | return z |
| | | |
| | | #读åbase6æä»¶è¯»ååå§æ°´ä½ |
| | | def get_init_flowZ(model_name,layer): |
| | | model_ws = base.model_dir + model_name |
| | | ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | strs = ml.bas6.strt.__getitem__(layer) |
| | | z = strs.get_value() |
| | | z[(z<=0)] = 0 |
| | | |
| | | #å¤çéªè¯æ¨¡åçæ æè¾¹ç |
| | | if model_name==base.not_allowed_model: |
| | | arr = np.loadtxt(base.areapath, dtype=int) |
| | | for i in range(len(arr)): |
| | | for j in range(len(arr[i])): |
| | | if arr[i][j] == -9999: |
| | | z[i][j]=0 |
| | | return z |
| | | |
| | | #è·åæµåºå¾çæäºçå¼çº¿æ°ç» |
| | | def get_flow_levels(data): |
| | | maxdata= np.max(data) |
| | | levels=[] |
| | | levels.append(0.1) |
| | | lines = 10 |
| | | while lines < maxdata: |
| | | levels.append(lines) |
| | | lines = lines + 10 |
| | | levels.append(math.ceil(maxdata)) |
| | | return levels |
| | | |
| | | |
| | | #è·ååå¹
æ¯è¾å°ççå¼çº¿å¾ |
| | | def get_flow_levels_small(min , max, line_count): |
| | | |
| | | step = round((max-min)/line_count,0) |
| | | if step <= 0: |
| | | step = 0.5 |
| | | levels=[] |
| | | levels.append(min) |
| | | lines = min + step |
| | | while lines < max: |
| | | levels.append(lines) |
| | | lines = lines + step |
| | | levels.append(max) |
| | | return levels |
| | | |
| | | #è·ååå¹
çZå¼ |
| | | def get_bf_z(model_name,per1,per2): |
| | | |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | mlist = CalHead.muiltyModelList() |
| | | if model_name==base.not_allowed_model: |
| | | dir = base.baseModel2 + "\\modflow.head" |
| | | if model_name in mlist : |
| | | dir = base.muiltyModel + model_name + "\\modflow.head" |
| | | |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | |
| | | #åå¹
å¼ |
| | | z1= alldata[int(per1+1)*3-1,0,:,:] |
| | | z2 = alldata[int(per2+1)*3-1,0,:,:] |
| | | res = np.subtract(z2,z1) |
| | | return res |
| | | |
| | | |
| | | #è·ååå¹
çæµåºå¾ |
| | | def getFlowFieldBF(model_name,per1,per2,outpath): |
| | | res = get_bf_z(model_name,per1,per2) |
| | | max = np.max(res) |
| | | min = np.min(res) |
| | | # ææ æåºè®¾ç½®ä¸º9999 |
| | | arr = np.loadtxt(base.areapath, dtype=int) |
| | | for i in range(len(arr)): |
| | | for j in range(len(arr[i])): |
| | | if arr[i][j] == -9999: |
| | | res[i][j]= 9999.0 |
| | | levels= get_flow_levels_small(min,max,10) |
| | | draw_flow(res,levels,outpath,cm.RdBu_r) |
| | | |
| | | str_levs=[] |
| | | for item in levels: |
| | | str_levs.append(str(round(item,2))) |
| | | |
| | | cols = get_gradient_color_list("RdBu_r",len(levels)) |
| | | dicts ={"levels":str_levs,"colors":cols} |
| | | xbl = WaterXBL.get_grd_storage(model_name,int(per1),int(per2)) |
| | | dicts["xbl"]=xbl |
| | | return dicts |
| | | |
| | | #è·ååå¹
åæ·± |
| | | def getFlowFieldDepth(model_name,per,layer,title,flow_types,out_path): |
| | | Z= get_flow_Z(model_name,per,layer) |
| | | dis = np.loadtxt(base.dis_top_path, dtype=str) |
| | | areaMatrix = np.loadtxt(base.areapath, dtype=int) |
| | | for i in range(len(areaMatrix)): |
| | | for j in range(len(areaMatrix[i])): |
| | | if areaMatrix[i][j] == -9999: |
| | | Z[i][j] =0.0 |
| | | else : |
| | | Z[i][j] =float(dis[i][j])-Z[i][j] |
| | | |
| | | levels=[10,30,40,60,100,200,300,400,500,700,900] |
| | | draw_flow(Z,levels,out_path,cm.RdBu_r) |
| | | cols = get_gradient_color_list("RdBu_r",len(levels)) |
| | | dicts ={"levels":levels,"colors":cols} |
| | | return dicts |
| | | |
| | | |
| | | def getWaterResFiled(model_name,per): |
| | | pic = str(int(time.time())) +".png" |
| | | outpath = base.flow_file + pic |
| | | flow_field(model_name,per,0,"flow","online",outpath) |
| | | return pic |
| | | |
| | | #æµåºå¾ |
| | | def flow_field(model_name,per,layer,title,flow_types,out_path): |
| | | Z= get_flow_Z(model_name,per,layer) |
| | | levels = get_flow_levels(Z) |
| | | draw_flow(Z,levels,out_path,cm.RdBu) |
| | | cols = get_gradient_color_list("RdBu",len(levels)) |
| | | dicts ={"levels":levels,"colors":cols} |
| | | return dicts |
| | | |
| | | |
| | | #åå§æµåºå¾ |
| | | def init_flow_field(model_name,layer,title,flow_types,out_path): |
| | | #妿æ¯éªè¯æ¨¡åç´æ¥è¯»åbase6çæä»¶å½ååå§æ°´å¤´ |
| | | if model_name == base.not_allowed_model: |
| | | Z= get_init_flowZ(model_name,layer) |
| | | levels = get_flow_levels(Z) |
| | | draw_flow(Z,levels,out_path,cm.RdBu) |
| | | else: |
| | | pjson= CalHead.get_model_json(model_name) |
| | | if "initHeader" in pjson: |
| | | initHead = pjson["initHeader"] |
| | | print(initHead) |
| | | Z= get_flow_Z(initHead,0,0) |
| | | levels = get_flow_levels(Z) |
| | | draw_flow(Z,levels,out_path,cm.RdBu) |
| | | else: |
| | | return "#" |
| | | print(out_path) |
| | | return out_path |
| | | |
| | | |
| | | #ç»å¶æµåº |
| | | def draw_flow(Z,levels,out_path,colorbar): |
| | | |
| | | #å¼å§ç»å¶æµåºå¾ |
| | | x = np.arange(0, 114, 1) |
| | | y = np.arange(0, 104, 1) |
| | | #ç½æ ¼ |
| | | X, Y = np.meshgrid(x, y) |
| | | #åæ è½´ç¿»è½¬ å·¦ä¸è§ä¸ºèµ·å§ |
| | | plt.gca().invert_yaxis() |
| | | #çå¼çº¿ |
| | | C = plt.contour(X, Y, Z, levels=levels, linewidths=0.5, colors='white') |
| | | #çå¼çº¿æ ·å¼ |
| | | plt.clabel(C, inline=2,fmt='%.2f', fontsize=4,colors='black') |
| | | #ç»å¶çå¼çº¿ä¹é´çå¡«å
# cmap="RdBu_r" cm.RdBu_r |
| | | plt.contourf(X, Y, Z,levels= levels,alpha = 0.75,cmap=colorbar) |
| | | |
| | | plt.axis("off") |
| | | plt.colorbar().ax.set_visible(False) |
| | | plt.xticks([]) |
| | | plt.yticks([]) |
| | | |
| | | plt.savefig(out_path, dpi=300,transparent=True, bbox_inches='tight') |
| | | plt.close('all') |
| | | return out_path |
| | | |
| | | |
| | | def get_gradient_color_list(m_color_name, m_num): |
| | | m_color_list = [] |
| | | m_color_map = plt.get_cmap(m_color_name, m_num) |
| | | for m_i in range(m_num): |
| | | m_color = tuple([int(_ * 256) for _ in list(m_color_map([m_i][0])[:-1])]) |
| | | m_color_list.append(m_color) |
| | | return m_color_list |
| | | |
| | | #å¯¼åºæ¨¡æ¿ |
| | | def exportReport(model_name,period): |
| | | tpl = DocxTemplate(base.prefix + 'å°ä¸æ°´åå¸ä¿¡æ¯æ¨¡æ¿.docx') |
| | | cont = archive_report_content(model_name,period) |
| | | tpl.render(cont) |
| | | save_path="" |
| | | tpl.save(save_path) |
| | | |
| | | |
| | | #æ¥åä¿¡æ¯ |
| | | def archive_report_content(model_name,period): |
| | | data1 = archive_grd_depth(model_name,period) |
| | | data2= archive_grd_res(model_name,period) |
| | | |
| | | #åå§æµåº |
| | | initpic = str(int(time.time())) +"1.png" |
| | | outpath = base.flow_file + initpic |
| | | flow_field(model_name,0,0,"åå§æµåºä¿¡æ¯", "online",outpath) |
| | | |
| | | currentpic = str(int(time.time())) +"2.png" |
| | | outpath2 = base.flow_file + currentpic |
| | | flow_field(model_name,int(period),0,"æµåºä¿¡æ¯", "online",outpath2) |
| | | |
| | | content={ |
| | | "py_avg_water":str(data1[0]), |
| | | "py_m_water":str(data1[1]), |
| | | "py_y_water":str(data1[2]), |
| | | |
| | | "sq_avg_water":str(data1[3]), |
| | | "sq_m_water":str(data1[4]), |
| | | "sq_y_water":str(data1[5]), |
| | | |
| | | "yq_avg_water":str(data1[6]), |
| | | "yq_m_water":str(data1[7]), |
| | | "yq_y_water":str(data1[8]), |
| | | |
| | | "w_m_res":str(data2[0]), |
| | | "w_y_res":str(data2[1]), |
| | | "flow1":"/xishan/xinshanFlow/"+initpic, |
| | | "flow2":"/xishan/xinshanFlow/"+currentpic |
| | | } |
| | | if data1[1]>=0: |
| | | content["py_m_water"]= "åå"+str(abs(data1[1])) |
| | | else: |
| | | content["py_m_water"]= "ä¸é"+str(abs(data1[1])) |
| | | |
| | | if data1[2]>=0: |
| | | content["py_y_water"]= "åå"+str(abs(data1[2])) |
| | | else: |
| | | content["py_y_water"]= "ä¸é"+str(abs(data1[2])) |
| | | |
| | | if data1[4]>=0: |
| | | content["sq_m_water"]= "åå"+str(abs(data1[4])) |
| | | else: |
| | | content["sq_m_water"]= "ä¸é"+str(abs(data1[4])) |
| | | |
| | | if data1[5]>=0: |
| | | content["sq_y_water"]= "åå"+str(abs(data1[5])) |
| | | else: |
| | | content["sq_y_water"]= "ä¸é"+str(abs(data1[5])) |
| | | |
| | | if data1[7]>=0: |
| | | content["yq_m_water"]= "åå"+str(abs(data1[7])) |
| | | else: |
| | | content["yq_m_water"]= "ä¸é"+str(abs(data1[7])) |
| | | |
| | | if data1[8]>=0: |
| | | content["yq_y_water"]= "åå"+str(abs(data1[8])) |
| | | else: |
| | | content["yq_y_water"]= "ä¸é"+str(abs(data1[8])) |
| | | |
| | | if data2[1]>=0: |
| | | content["w_y_res"]= "å¢å "+str(abs(data2[1])) |
| | | else: |
| | | content["w_y_res"]= "åå°"+str(abs(data2[1])) |
| | | return content |
| | | |
| | | #æ°´èµæºé |
| | | def archive_grd_res(model_name,period): |
| | | txt_path = base.model_dir + model_name + "\\water_res.txt" |
| | | if not os.path.exists(txt_path): |
| | | CalHead.run_zonebudget_res(model_name) |
| | | |
| | | monthdata = CalHead.water_res_month(model_name,txt_path,int(period)) |
| | | monthdata2=[] |
| | | if int(period) > 0: |
| | | monthdata2 = CalHead.water_res_month(model_name,txt_path,int(period-1)) |
| | | else: |
| | | monthdata2 = monthdata |
| | | |
| | | water1 = monthdata[0] + monthdata[1]+ monthdata[2]- monthdata[3] |
| | | water2 = monthdata2[0] + monthdata2[1] + monthdata2[2] - monthdata2[3] |
| | | water1 = round(water1 ,4) |
| | | wat= round(water1-water2 ,4) |
| | | |
| | | return [water1,wat] |
| | | |
| | | #çææåº¦æ¥åçå
容å°ä¸æ°´æ·± |
| | | def archive_grd_depth(model_name,period): |
| | | model_dir = base.model_dir + model_name + "\\modflow.head" |
| | | head = bf.HeadFile(model_dir) |
| | | alldata = head.get_alldata() |
| | | |
| | | #è·åæåº¦æ°æ® |
| | | per = int(period) |
| | | current_month_data=[] |
| | | pre_month_data=[] |
| | | pre_year_data=[] |
| | | |
| | | if per > 0: |
| | | current_month_data = alldata[int(per+1)*3-1,0,:,:] |
| | | pre_month_data= alldata[int(per)*3-1,0,:,:] |
| | | else : |
| | | current_month_data = alldata[int(per+1)*3-1,0,:,:] |
| | | pre_month_data= current_month_data |
| | | |
| | | mpdict = getMPDict() |
| | | YQSDict = getYQSDict() |
| | | pingyuanArray = mpdict["1"] |
| | | shanquArray = mpdict["2"] |
| | | yqArray = YQSDict["1"] |
| | | |
| | | #é«ç¨åæ |
| | | gc_array= np.array(getTopDis()) |
| | | depth_array = np.subtract(gc_array,current_month_data) |
| | | |
| | | py_data,py_data2 = 0,0 |
| | | sq_data, sq_data2 = 0,0 |
| | | yqs_data , yqs_data2 = 0,0 |
| | | # åæ·± |
| | | py_depth,sq_depth,yqs_depth =0,0,0 |
| | | k,m,n =0,0,0 |
| | | for item in pingyuanArray: |
| | | i,j = item[0],item[1] |
| | | if current_month_data[i,j]>0: |
| | | py_depth += depth_array[i,j] |
| | | py_data += current_month_data[i,j] |
| | | k+=1 |
| | | if pre_month_data[i,j]>0: |
| | | py_data2 += pre_month_data[i,j] |
| | | |
| | | for item in shanquArray: |
| | | i,j = item[0],item[1] |
| | | if current_month_data[i,j]>0: |
| | | sq_depth += depth_array[i,j] |
| | | sq_data += current_month_data[i,j] |
| | | m+=1 |
| | | if pre_month_data[i,j]>0: |
| | | sq_data2 += pre_month_data[i,j] |
| | | |
| | | for item in yqArray: |
| | | i,j = item[0],item[1] |
| | | if current_month_data[i,j]>0: |
| | | yqs_depth += depth_array[i,j] |
| | | yqs_data += current_month_data[i,j] |
| | | n+=1 |
| | | if pre_month_data[i,j]>0: |
| | | yqs_data2 += pre_month_data[i,j] |
| | | |
| | | py_data = py_data/k |
| | | sq_data = sq_data/m |
| | | yqs_data= yqs_data/n |
| | | |
| | | py_data2 = py_data2/k |
| | | sq_data2 = sq_data2/m |
| | | yqs_data2= yqs_data2/n |
| | | |
| | | py_depth = py_depth/k |
| | | sq_depth = sq_depth/m |
| | | yqs_depth = yqs_depth/n |
| | | |
| | | |
| | | result=[py_depth,py_data-py_data2,0, |
| | | sq_depth,sq_data-sq_data2,0, |
| | | yqs_depth,yqs_data-yqs_data2,0] |
| | | |
| | | for i in range(len(result)): |
| | | result[i]= round(result[i],2) |
| | | |
| | | return result |
| | | |
| | | |
| | | |
| | | #è·åç ç©¶åºå å±±åº å¹³ååº çæ³å±±åºçæçº¿å¾ |
| | | def getXs3LineChart(paths): |
| | | |
| | | head = bf.HeadFile(paths) |
| | | alldata = head.get_alldata() |
| | | months = int(len(alldata)/3) |
| | | |
| | | mpdict = getMPDict() |
| | | YQSDict = getYQSDict() |
| | | |
| | | pingyuanArray = mpdict["1"] |
| | | shanquArray = mpdict["2"] |
| | | yqArray = YQSDict["1"] |
| | | |
| | | result1=[] |
| | | result2=[] |
| | | result3=[] |
| | | for per in range(months): |
| | | current_month = alldata[int(per+1)*3-1,0,:,:] |
| | | |
| | | yqs_data, py_data, sq_data = 0.0, 0.0, 0.0 |
| | | M ,N,k = 0,0,0 |
| | | |
| | | for item in pingyuanArray: |
| | | i,j = item[0],item[1] |
| | | if current_month[i,j]>0: |
| | | py_data += current_month[i,j] |
| | | M+=1 |
| | | py_data= round(py_data/M,2) |
| | | result1.append(py_data) |
| | | |
| | | for item in shanquArray: |
| | | i,j = item[0],item[1] |
| | | if current_month[i,j]>0: |
| | | sq_data += current_month[i,j] |
| | | N+=1 |
| | | sq_data= round(sq_data/N,2 ) |
| | | result2.append(sq_data) |
| | | |
| | | for item in yqArray: |
| | | i,j = item[0],item[1] |
| | | if current_month[i,j]>0: |
| | | yqs_data += current_month[i,j] |
| | | k+=1 |
| | | yqs_data= round(yqs_data/k,2 ) |
| | | result3.append(yqs_data) |
| | | |
| | | |
| | | dicts={"pyq":result1,"sq":result2,"yqs":result3} |
| | | return dicts |
| | | |
| | | |
| | | |
| | | #å±±åº/å¹³ååºçåå
¸ |
| | | #å¹³ååº=1 å±±åº =2 |
| | | def getMPDict(): |
| | | arr = np.loadtxt(base.xs_mp_path, dtype=int) |
| | | dict ={} |
| | | for i in range(len(arr)): |
| | | for j in range(len(arr[i])): |
| | | zb = str(arr[i][j]) |
| | | if arr[i][j] == -9999: |
| | | continue |
| | | if zb not in dict: |
| | | dict[zb] = [(i,j)] |
| | | else: |
| | | dict[zb].append((i,j)) |
| | | return dict |
| | | |
| | | #çæ³å±±çåå
¸ |
| | | #çæ³å±± = 1 |
| | | def getYQSDict(): |
| | | arr = np.loadtxt(base.yqs_path, dtype=int) |
| | | dict ={} |
| | | for i in range(len(arr)): |
| | | for j in range(len(arr[i])): |
| | | zb = str(arr[i][j]) |
| | | if arr[i][j] != 1: |
| | | continue |
| | | if zb not in dict: |
| | | dict[zb] = [(i,j)] |
| | | else: |
| | | dict[zb].append((i,j)) |
| | | return dict |
| | | |
| | | #è·åé«ç¨ |
| | | def getTopDis(): |
| | | arr = np.loadtxt(base.dis_top_path, dtype=str) |
| | | float_array = np.array(arr).astype("float") |
| | | return float_array |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
New file |
| | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Fri Oct 20 16:15:23 2023 |
| | | |
| | | @author: ZMK |
| | | """ |
| | | |
| | | import numpy as np |
| | | import shutil |
| | | import os |
| | | |
| | | |
| | | # çæçæµåºå¾æä»¶ |
| | | flow_file ="D:\\javaCode\\xishan\\xishan\\xishan\\xinshanFlow\\" |
| | | |
| | | prefix ='C:\\Users\\86134\\Desktop\\xsModel2\\' |
| | | |
| | | ZoneBudget64Exe= prefix + "zonebuget\\ZoneBudget64.exe" |
| | | |
| | | water_bal_zones = prefix +"zonebuget\\water_bal.zones\n" |
| | | water_res_zones = prefix +"zonebuget\\water_res.zones\n" |
| | | |
| | | water_lsh_path = prefix + "water_lsh.ini" |
| | | water_yhy_path = prefix + "water_yhy.ini" |
| | | water_dbw_path = prefix + "water_dbw.ini" |
| | | |
| | | |
| | | baseModel = prefix + 'verifyModel\\' |
| | | baseModel2 = prefix + 'verifyModel2\\' |
| | | |
| | | predictModel= prefix + 'predictModel\\' |
| | | |
| | | predictParamModel= prefix + 'predictParamModel\\' |
| | | |
| | | muiltyModel = prefix + 'muiltyModel\\' |
| | | |
| | | model_dir = prefix + '0612Model\\' |
| | | |
| | | obswellpath = prefix + 'çæµäº.ini' |
| | | obswell_data_path= prefix + 'water_obs_data.ini' |
| | | |
| | | obs_well = np.loadtxt(obswellpath, dtype=str,encoding='utf-8') |
| | | |
| | | district_path = prefix +"åºå¿.ini" |
| | | |
| | | district= np.loadtxt(district_path, dtype=str,encoding='utf-8') |
| | | |
| | | pumpwellpath = prefix +'æ½æ°´äº.ini' |
| | | |
| | | pump_well = np.loadtxt(pumpwellpath, dtype=str,encoding='utf-8') |
| | | |
| | | period_path = prefix +"period.json" |
| | | |
| | | areapath = prefix + 'ååº.ini' |
| | | area_array = np.loadtxt(areapath, dtype=str,encoding='utf-8') |
| | | |
| | | #æ°´åè¡¡è·¯å¾ |
| | | water_equ_path = prefix + 'water_equ.ini' |
| | | water_equ = np.loadtxt(water_equ_path, dtype=str,encoding='utf-8') |
| | | |
| | | water_equ_path2022 = prefix + 'water_equ2022.ini' |
| | | water_equ2022 = np.loadtxt(water_equ_path2022, dtype=str,encoding='utf-8') |
| | | |
| | | #å°è¡¨é«ç¨æ°æ® |
| | | dis_top_path = prefix + 'md_dis_top.ini' |
| | | |
| | | #ååºç卿°´ç³»æ° |
| | | lpf_path = prefix + 'md_lpf.ini' |
| | | md_lpf = np.loadtxt(lpf_path, dtype=str,encoding='utf-8') |
| | | |
| | | # #çæ³å±±ç©éµæ°æ® |
| | | yqs_path= prefix + 'çæ³å±±æ³åå.ini' |
| | | xs_yqs_matrix = np.loadtxt(yqs_path, dtype=str,encoding='utf-8') |
| | | |
| | | # #å±±åºå¹³ååºç©éµ |
| | | xs_mp_path = prefix + 'å±±åºå¹³ååºåå.ini' |
| | | xs_mp_matrix = np.loadtxt(xs_mp_path, dtype=str,encoding='utf-8') |
| | | |
| | | |
| | | model_config ='D:\\javaCode\\xishan\\objclipdig\\ModelFlow_xishan\\config.ini' |
| | | |
| | | model3d_path='D:/javaCode/xishan/xishan/xishan/output2/' |
| | | |
| | | modeldata_csv_path ="C:/Users/86134/Desktop/xsModel2/0612Model/" |
| | | |
| | | exe_path = 'D:/javaCode/xishan/objclipdig/ModelFlow_xishan/ModelFlow_xishan.exe' |
| | | |
| | | |
| | | #è°å¨ exe ç¨åº |
| | | def callModelexe(): |
| | | os.system(exe_path) |
| | | |
| | | |
| | | #æ´æ°æ¨¡åçexeé
ç½® |
| | | def updateModelConfig(model_name): |
| | | conf = np.loadtxt(model_config, dtype=str,encoding='utf-8') |
| | | outpath = "outpath=" + model3d_path + model_name |
| | | csvpath = "csvpath=" + modeldata_csv_path + model_name +"/output" |
| | | conf[1]=outpath |
| | | conf[2]=csvpath |
| | | np.savetxt(model_config,conf, newline='\n', fmt='%s' , encoding='utf-8') |
| | | |
| | | |
| | | |
| | | def getPumpWellName(row,column): |
| | | |
| | | for index, r, c,ids, qu ,name in pump_well: |
| | | if r==row and c == column: |
| | | return name |
| | | |
| | | return "NONE" |
| | | |
| | | |
| | | #è·åç©éµåç»çåå
¸ç»æ |
| | | def getAreas(): |
| | | arr = np.loadtxt(areapath, dtype=int) |
| | | dict ={} |
| | | for i in range(len(arr)): |
| | | for j in range(len(arr[i])): |
| | | zb = str(arr[i][j]) |
| | | if arr[i][j] == -9999: |
| | | continue |
| | | if zb not in dict: |
| | | dict[zb] = [(i,j)] |
| | | else: |
| | | dict[zb].append((i,j)) |
| | | return dict |
| | | |
| | | |
| | | def getAreaDictFirstIndex(): |
| | | arr = np.loadtxt(areapath, dtype=int) |
| | | dict ={} |
| | | for i in range(len(arr)): |
| | | for j in range(len(arr[i])): |
| | | if arr[i][j] == -9999: |
| | | continue |
| | | if arr[i][j] not in dict: |
| | | dict[arr[i][j]] = [(i,j)] |
| | | |
| | | return dict |
| | | |
| | | |
| | | #è·ååç»å°æ åå
¸æ°æ® |
| | | def getAreaDictIndexArray(): |
| | | arr = np.loadtxt(areapath, dtype=int) |
| | | dict_array={} |
| | | for i in range(len(arr)): |
| | | for j in range(len(arr[i])): |
| | | zb= str(arr[i][j]) |
| | | if arr[i][j] == -9999: |
| | | continue |
| | | if zb not in dict_array: |
| | | array= [] |
| | | index = getCellIdByRC(i+1,j+1) |
| | | array.append(index) |
| | | dict_array[zb] = array |
| | | else: |
| | | index = getCellIdByRC(i+1,j+1) |
| | | dict_array[zb].append(index) |
| | | |
| | | return dict_array |
| | | |
| | | |
| | | def getCellIdByRC(rowVal, columnVal): |
| | | return (rowVal - 1) * 114 + columnVal - 1; |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | |
| | | """ |
| | | |
| | | import numpy as np |
| | | import shutil |
| | | import os |
| | | |
| | | |
| | | |
| | | #æææ¨¡åçé»è®¤åå |
| | | not_allowed_model="202001_202212" |
| | | |
| | | archive_models=["SP0-0","SP1-1","SP1-2","SP1-3","SP2-1","SP2-2","SP2-3","SP3-1", |
| | | "SP3-2","SP3-4","SP3-5","SP3-6","SP3-7","SP4-1","SP4-7"] |
| | | |
| | | # çæçæµåºå¾æä»¶ |
| | | flow_file ="D:\\javaCode\\xishan\\xishan\\xishan\\xinshanFlow\\" |
| | | |
| | | prefix ='C:\\Users\\ZMK\\Desktop\\xsModel2\\' |
| | | |
| | | ZoneBudget64Exe= prefix + "zonebuget\\ZoneBudget64.exe" |
| | | |
| | | water_bal_zones = prefix +"zonebuget\\water_bal.zones\n" |
| | | water_res_zones = prefix +"zonebuget\\water_res.zones\n" |
| | | |
| | | water_lsh_path = prefix + "water_lsh.ini" |
| | | water_yhy_path = prefix + "water_yhy.ini" |
| | | water_dbw_path = prefix + "water_dbw.ini" |
| | | |
| | | |
| | | baseModel = prefix + 'verifyModel\\' |
| | | baseModel2 = prefix + 'verifyModel2\\' |
| | | |
| | | predictModel= prefix + 'predictModel\\' |
| | | predictModel60 = prefix + 'predictModel60\\' |
| | | |
| | | predictParamModel= prefix + 'predictParamModel\\' |
| | | |
| | | muiltyModel = prefix + 'muiltyModel\\' |
| | | |
| | | model_dir = prefix + '0612Model\\' |
| | | |
| | | obswellpath = prefix + 'çæµäº.ini' |
| | | obswell_data_path= prefix + 'water_obs_data.ini' |
| | | |
| | | well_scale_path = prefix + 'well_scale.ini' |
| | | |
| | | obs_well = np.loadtxt(obswellpath, dtype=str,encoding='utf-8') |
| | | |
| | |
| | | water_equ_path2022 = prefix + 'water_equ2022.ini' |
| | | water_equ2022 = np.loadtxt(water_equ_path2022, dtype=str,encoding='utf-8') |
| | | |
| | | |
| | | #å°è¡¨é«ç¨æ°æ® |
| | | dis_top_path = prefix + 'md_dis_top.ini' |
| | | |
| | | #ååºç卿°´ç³»æ° |
| | | lpf_path = prefix + 'md_lpf.ini' |
| | | md_lpf = np.loadtxt(lpf_path, dtype=str,encoding='utf-8') |
| | | |
| | | # #çæ³å±±ç©éµæ°æ® |
| | | yqs_path= prefix + 'çæ³å±±æ³åå.ini' |
| | | xs_yqs_matrix = np.loadtxt(yqs_path, dtype=str,encoding='utf-8') |
| | | |
| | | # #å±±åºå¹³ååºç©éµ |
| | | xs_mp_path = prefix + 'å±±åºå¹³ååºåå.ini' |
| | | xs_mp_matrix = np.loadtxt(xs_mp_path, dtype=str,encoding='utf-8') |
| | | |
| | | |
| | | model_config ='C:\\Users\\ZMK\\Desktop\\objclipdig\\ModelFlow_xishan\\config.ini' |
| | | |
| | | model3d_path='D:/javaCode/xishan/xishan/xishan/output2/' |
New file |
| | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Tue Oct 31 16:12:55 2023 |
| | | |
| | | @author: ZMK |
| | | """ |
| | | |
| | | |
| | | def mergeWaterData(balArray,resArray): |
| | | dicts ={} |
| | | |
| | | inarray=[] |
| | | inarray.append({"name":"éæ°´å
¥æ¸é","value":balArray[0]}) |
| | | inarray.append({"name":"æ²³æµå
¥æ¸é","value":balArray[1]}) |
| | | inarray.append({"name":"L1ä¾§åè¡¥ç»é","value":balArray[2]}) |
| | | inarray.append({"name":"L3ä¾§åè¡¥ç»é","value":balArray[3]}) |
| | | outarray=[] |
| | | outarray.append({"name":"人工å¼éé","value":balArray[4]}) |
| | | outarray.append({"name":"L1ä¾§åæµåºé","value":balArray[5]}) |
| | | outarray.append({"name":"L3ä¾§åæµåºé","value":balArray[6]}) |
| | | dicts["pie1"]=inarray |
| | | dicts["pie2"]=outarray |
| | | |
| | | |
| | | inarray2=[] |
| | | inarray2.append({"name":"大æ°éæ°´","value":resArray[0]}) |
| | | inarray2.append({"name":"æ°¸å®æ²³æ¸æ¼","value":resArray[1]}) |
| | | inarray2.append({"name":"ä¾§åæµå
¥","value":resArray[2]}) |
| | | outarray2=[] |
| | | outarray2.append({"name":"ä¾§åæµåº","value":resArray[3]}) |
| | | |
| | | dicts["pie1"]=inarray |
| | | dicts["pie2"]=outarray |
| | | dicts["pie3"]=inarray2 |
| | | dicts["pie4"]=outarray2 |
| | | |
| | | return dicts |
| | | |
| | | |
| | |
| | | @author: ZMK |
| | | """ |
| | | |
| | | import flopy |
| | | import flopy.utils.binaryfile as bf |
| | | import csv |
| | | import Base as base |
| | | import os |
| | | import json |
| | | import subprocess |
| | | import re |
| | | import ModelPeriod |
| | | import numpy as np |
| | | import AchiveReport |
| | | |
| | | |
| | | #æ·»å modle |
| | | def addModelJson(model_name,start_time,end_time,remark): |
| | | |
| | | context="" |
| | | prediction_path = base.prefix + "\\model_list.json" |
| | | with open(prediction_path,encoding='utf-8') as f: |
| | | context = json.load(f) |
| | | array=[] |
| | | for item in context: |
| | | array.append(item) |
| | | |
| | | dicts={"model_name":model_name, |
| | | "start_time":start_time,"end_time":end_time,"remark":remark} |
| | | array.append(dicts) |
| | | |
| | | with open(prediction_path, "w",encoding='utf-8') as outfile: |
| | | json.dump(array, outfile,ensure_ascii=False) |
| | | return "ä¿å宿¯ï¼"; |
| | | |
| | | #å é¤model |
| | | def removeModelJson(model_name): |
| | | context="" |
| | | prediction_path = base.prefix + "\\model_list.json" |
| | | with open(prediction_path,encoding='utf-8') as f: |
| | | context = json.load(f) |
| | | array=[] |
| | | for item in context: |
| | | if item["model_name"] != model_name: |
| | | array.append(item) |
| | | |
| | | with open(prediction_path, "w",encoding='utf-8') as outfile: |
| | | json.dump(array, outfile,ensure_ascii=False) |
| | | |
| | | return "å 餿¨¡å宿¯ï¼"; |
| | | |
| | | |
| | | def get_model_json(model_name): |
| | | period_json="" |
| | | prediction_path = base.model_dir + model_name +"\\prediction.json" |
| | | with open(prediction_path,encoding='utf-8') as f: |
| | | period_json = json.load(f) |
| | | |
| | | period_json = json.load(f) |
| | | return period_json; |
| | | |
| | | def get_model_period(model_name): |
| | | period_json="" |
| | | prediction_path = base.model_dir + model_name +"\\prediction.json" |
| | | with open(prediction_path,encoding='utf-8') as f: |
| | | period_json = json.load(f) |
| | | |
| | | period_json= get_model_json(model_name) |
| | | start_time = period_json["start_time"] |
| | | end_time = period_json["end_time"] |
| | | |
| | | end_time = period_json["end_time"] |
| | | months = ModelPeriod.get_months_in_range_ym(start_time, end_time) |
| | | return months; |
| | | |
| | | def is_cloned_model(model_name): |
| | | paths = base.model_dir + model_name + "\\water_bal.txt" |
| | | if os.path.exists(paths): |
| | | return False |
| | | return True |
| | | |
| | | |
| | | #è§æµäºchart |
| | | def obsChartdata(model_name, row, column): |
| | | def obsChartdata(wellId,model_name, row, column): |
| | | |
| | | row = int(row)-1 |
| | | column = int(column)-1 |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | period = len(alldata) |
| | | |
| | | layer = 3 |
| | | |
| | | xdata = [] |
| | | ydata = [] |
| | | result = {} |
| | | for per in range(period): |
| | | for lay in range(layer): |
| | | if per % 3 == 0 and lay == 0: |
| | | md = (int)(lay / 3 + 1) |
| | | per_array = alldata[per][lay] |
| | | |
| | | cell_data = (float)(per_array[row][column]) |
| | | ydata.append(cell_data) |
| | | |
| | | period_json= get_model_json(model_name) |
| | | |
| | | dir="" |
| | | period_json= get_model_json(model_name) |
| | | start_time = period_json["start_time"] |
| | | end_time = period_json["end_time"] |
| | | xmonths = ModelPeriod.get_months_in_range_ym(start_time, end_time) |
| | | |
| | | months = ModelPeriod.get_months_in_range_ym(start_time, end_time) |
| | | |
| | | result = {"y_data": ydata, "x_data": months} |
| | | if model_name == base.not_allowed_model: |
| | | dir = base.baseModel2 + "\\modflow.head" |
| | | else: |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | cloned = is_cloned_model(model_name) |
| | | if cloned ==True: |
| | | return {"y_data": [],"y_data2":[],"x_data":xmonths } |
| | | |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | |
| | | #å¨ææ° |
| | | months = int(len(alldata)/3) |
| | | ydata= [] |
| | | result = {} |
| | | for month in range(months): |
| | | z1= alldata[int(month+1)*3-1,0,:,:] |
| | | cell_data = float(z1[row][column]) |
| | | ydata.append(round(cell_data,2)) |
| | | |
| | | y_data2=[] |
| | | if model_name == base.not_allowed_model: |
| | | array_data = np.loadtxt(base.obswell_data_path, dtype=str,encoding='utf-8') |
| | | y_data2= getObsData(wellId,array_data) |
| | | |
| | | result = {"y_data": ydata,"y_data2":y_data2,"x_data": xmonths} |
| | | return result |
| | | |
| | | def getRowCloumnById(index_id): |
| | | row = 104 |
| | | column =114 |
| | | count=0 |
| | | def getObsData(wellId,array_data): |
| | | result =[] |
| | | new_list =[] |
| | | for item in array_data: |
| | | if item[0]==wellId: |
| | | result.append(item[3]) |
| | | for i in range(0,len(result),3): |
| | | data =( float(result[i]) +float(result[i+1])+float(result[i+2]))/3 |
| | | data = round(data,2) |
| | | new_list.append(data) |
| | | |
| | | return new_list; |
| | | |
| | | |
| | | def getRowCloumnById(index_id): |
| | | row,column,count = 104,114,0 |
| | | for i in range(row): |
| | | for j in range(column): |
| | | if index_id == count: |
| | |
| | | #å°ä¸æ°´ä¿¡æ¯ |
| | | def earthWaterChart(model_name, index_id): |
| | | |
| | | row_column = getRowCloumnById(index_id) |
| | | |
| | | row_column = getRowCloumnById(index_id) |
| | | row = row_column[0] |
| | | column = row_column[1] |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | period = len(alldata) |
| | | |
| | | layer = 3 |
| | | |
| | | ydata = [] |
| | | result = {} |
| | | for per in range(period): |
| | | for lay in range(layer): |
| | | if per % 3 == 0 and lay == 0: |
| | | |
| | | per_array = alldata[per][lay] |
| | | |
| | | cell_data = (float)(per_array[row][column]) |
| | | ydata.append(cell_data) |
| | | |
| | | period_json= get_model_json(model_name) |
| | | |
| | | period_json= get_model_json(model_name) |
| | | start_time = period_json["start_time"] |
| | | end_time = period_json["end_time"] |
| | | xmonths = ModelPeriod.get_months_in_range_ym(start_time, end_time) |
| | | dir = "" |
| | | if model_name == base.not_allowed_model: |
| | | dir = base.baseModel2 + "\\modflow.head" |
| | | else: |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | cloned = is_cloned_model(model_name) |
| | | if cloned ==True: |
| | | return {"y_data": [],"x_data":xmonths } |
| | | |
| | | months = ModelPeriod.get_months_in_range_ym(start_time, end_time) |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | |
| | | #å¨ææ° |
| | | months = int(len(alldata)/3) |
| | | ydata= [] |
| | | result = {} |
| | | for month in range(months): |
| | | z1= alldata[int(month+1)*3-1,0,:,:] |
| | | cell_data = float(z1[row][column]) |
| | | ydata.append(round(cell_data,2)) |
| | | |
| | | result = {"y_data": ydata, "x_data": months} |
| | | result = {"y_data": ydata, "x_data": xmonths} |
| | | return result |
| | | |
| | | def heatmapdata(model_name,period): |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | |
| | | head = bf.HeadFile(dir) |
| | | |
| | | alldata = head.get_alldata() |
| | | |
| | | |
| | | index = int(period)*3 |
| | | return alldata[index][0] |
| | | |
| | | |
| | | #æ°´åè¡¡è®¡ç® |
| | | def waterEqu(model_name): |
| | | if model_name == '202001_202212': |
| | | water_equ_path = base.prefix + "\\water_equ.json" |
| | | with open(water_equ_path,encoding='utf-8') as f: |
| | | data = json.load(f) |
| | | return data |
| | | else: |
| | | year = model_name[0:4] |
| | | title =[year] |
| | | dict ={"title":title} |
| | | |
| | | celldata = np.array(base.water_equ2022).tolist() |
| | | |
| | | predict_json= get_model_json(model_name) |
| | | |
| | | a1=float(celldata[0]) |
| | | a2=float(celldata[1]) |
| | | a3=float(celldata[2]) |
| | | a4=float(celldata[3]) |
| | | |
| | | b1=float(celldata[4]) |
| | | b2=float(celldata[5]) |
| | | b3=float(celldata[6]) |
| | | |
| | | if predict_json["rain"]: |
| | | a1= float(predict_json["rain"]["ratio"]) * float(celldata[0]) |
| | | a3= float(predict_json["rain"]["ratio"]) * float(celldata[2]) |
| | | a4= float(predict_json["rain"]["ratio"]) * float(celldata[3]) |
| | | b2= float(predict_json["rain"]["ratio"]) * float(celldata[5]) |
| | | b3= float(predict_json["rain"]["ratio"]) * float(celldata[6]) |
| | | if predict_json["river"]: |
| | | a2= float(predict_json["river"]["ratio"]) * float(celldata[1]) |
| | | |
| | | if predict_json["mine"]: |
| | | b1= b1 |
| | | |
| | | in_data= a1+a2+a3+a4 |
| | | out_data= b1 +b2 + b3 |
| | | float_data=[a1,a2,a3,a4,in_data,b1,b2,b3,out_data,in_data-out_data] |
| | | |
| | | inarray=[] |
| | | inarray.append({"name":"éæ°´å
¥æ¸é","value":a1}) |
| | | inarray.append({"name":"æ²³æµå
¥æ¸é","value":a2}) |
| | | inarray.append({"name":"L1ä¾§åè¡¥ç»é","value":a3}) |
| | | inarray.append({"name":"L3ä¾§åè¡¥ç»é","value":a4}) |
| | | outarray=[] |
| | | outarray.append({"name":"人工å¼éé","value":b1}) |
| | | outarray.append({"name":"L1ä¾§åæµåºé","value":b2}) |
| | | outarray.append({"name":"L3ä¾§åæµåºé","value":b3}) |
| | | pie1={str(year):inarray} |
| | | pie2={str(year):outarray} |
| | | |
| | | dict["pie1"]=pie1 |
| | | dict["pie2"]=pie2 |
| | | |
| | | array2d=[] |
| | | array2d.append([str(year)]) |
| | | for i in range(len(float_data)): |
| | | tmp=[] |
| | | tmp.append(str(float_data[i])) |
| | | array2d.append(tmp) |
| | | dict["data"]=array2d |
| | | return dict |
| | | |
| | | |
| | | #导åºcsvæä»¶ |
| | | def exportCsV(model_name): |
| | | |
| | |
| | | if not os.path.exists(out_path): |
| | | os.mkdir(out_path) |
| | | |
| | | head = bf.HeadFile(dir) |
| | | |
| | | # z1= alldata[int(per1)*3,0,:,:] |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | month = len(alldata) |
| | | layer = 3 |
| | | |
| | | for i in range(month): |
| | | for j in range(layer): |
| | | if i % 3 == 0: |
| | | md = (int)(i / 3 + 1) |
| | | filename = out_path + str(md) + '-' + str(j+1) + '.csv' |
| | | months = int(len(alldata)/3) |
| | | layers = 3 |
| | | #ä¾å¦ 0-36æ |
| | | for month in range(months): |
| | | for layer in range (layers): |
| | | z_last= alldata[(month+1)*3-1,layer,:,:] |
| | | |
| | | filename = out_path + str(month+1) + '-' + str(layer+1) + '.csv' |
| | | f = open(filename, 'w', newline='') |
| | | writer = csv.writer(f) |
| | | for p in alldata[i][j]: |
| | | |
| | | for p in z_last: |
| | | writer.writerow(p) |
| | | f.close() |
| | | |
| | | |
| | | return out_path |
| | | |
| | | |
| | | #æ°´åè¡¡è®¡ç® |
| | | def waterEqu(model_name): |
| | | if model_name == base.not_allowed_model: |
| | | water_equ_path = base.prefix + "\\water_equ.json" |
| | | with open(water_equ_path,encoding='utf-8') as f: |
| | | data = json.load(f) |
| | | return data |
| | | else: |
| | | year = model_name |
| | | title =[year] |
| | | dict ={"title":title} |
| | | |
| | | jx = get_model_json(model_name) |
| | | dict["start_time"]=jx["start_time"] |
| | | dict["end_time"]=jx["end_time"] |
| | | |
| | | paths=base.model_dir + model_name +"\\water_bal.txt" |
| | | wat = water_balance(model_name, paths) |
| | | |
| | | in_data= round(wat[0]+ wat[1]+ wat[2]+ wat[3] , 4) |
| | | out_data= round(wat[4] + wat[5] + wat[6], 4) |
| | | inout = round(in_data-out_data, 4) |
| | | float_data=[wat[0],wat[1],wat[2],wat[3],in_data, |
| | | wat[4],wat[5], wat[6],out_data,inout] |
| | | |
| | | inarray=[] |
| | | inarray.append({"name":"éæ°´å
¥æ¸é","value":wat[0]}) |
| | | inarray.append({"name":"æ²³æµå
¥æ¸é","value":wat[1]}) |
| | | inarray.append({"name":"ä¸å±ä¾§åè¡¥ç»é","value":wat[2]}) |
| | | inarray.append({"name":"ä¸å±ä¾§åè¡¥ç»é","value":wat[3]}) |
| | | outarray=[] |
| | | outarray.append({"name":"人工å¼éé","value":wat[4]}) |
| | | outarray.append({"name":"ä¸å±ä¾§åæµåºé","value":wat[5]}) |
| | | outarray.append({"name":"ä¸å±ä¾§åæµåºé","value":wat[6]}) |
| | | pie1={str(year):inarray} |
| | | pie2={str(year):outarray} |
| | | |
| | | dict["pie1"]=pie1 |
| | | dict["pie2"]=pie2 |
| | | |
| | | array2d=[] |
| | | array2d.append(["æ°æ®ï¼äº¿ç«æ¹ç±³ï¼"]) |
| | | for i in range(len(float_data)): |
| | | tmp=[] |
| | | tmp.append(str(float_data[i])) |
| | | array2d.append(tmp) |
| | | dict["data"]=array2d |
| | | return dict |
| | | |
| | | |
| | | |
| | | def run_zonebudget_bal(model_name): |
| | | # å®ä¹exeæä»¶çè·¯å¾ååæ° |
| | | exe_path = base.ZoneBudget64Exe |
| | | txt_path = base.model_dir + model_name + "\\water_bal.txt\n" |
| | | cell_path = base.model_dir + model_name + "\\modflow.flow\n" |
| | | process = subprocess.Popen([exe_path], stdin=subprocess.PIPE,shell = True) |
| | | process.stdin.write(txt_path.encode()) # è¾å
¥åæ°1 |
| | | process.stdin.write(cell_path.encode()) |
| | | process.stdin.write(b"title\n") |
| | | process.stdin.write(base.water_bal_zones.encode()) |
| | | process.stdin.write(b"A\n") |
| | | output, _ = process.communicate() |
| | | print(output) |
| | | |
| | | |
| | | def run_zonebudget_res(model_name): |
| | | # å®ä¹exeæä»¶çè·¯å¾ååæ° |
| | | exe_path = base.ZoneBudget64Exe |
| | | txt_path = base.model_dir + model_name + "\\water_res.txt\n" |
| | | cell_path = base.model_dir + model_name + "\\modflow.flow\n" |
| | | process = subprocess.Popen([exe_path], stdin=subprocess.PIPE,shell = True) |
| | | process.stdin.write(txt_path.encode()) # è¾å
¥åæ°1 |
| | | process.stdin.write(cell_path.encode()) |
| | | process.stdin.write(b"title\n") |
| | | process.stdin.write(base.water_res_zones.encode()) |
| | | process.stdin.write(b"A\n") |
| | | output, _ = process.communicate() |
| | | print(output) |
| | | |
| | | def reg_find_int(text): |
| | | numbers = re.findall(r'\d+', text) |
| | | return numbers |
| | | |
| | | |
| | | def read_txt(path): |
| | | data =[] |
| | | with open(path, 'r') as file: |
| | | lines = file.readlines() |
| | | for line in lines: |
| | | data.append(line) |
| | | return data |
| | | |
| | | #è§£ææ°´åè¡¡æ°æ® |
| | | def water_balance(model_name,paths): |
| | | data= read_txt(paths) |
| | | lens = len(data) |
| | | index = 0 |
| | | segment=[] |
| | | dicts={} |
| | | flag = 0 |
| | | title="" |
| | | while index < lens: |
| | | strs = data[index].strip() |
| | | if strs.startswith("Flow Budget for Zone"): |
| | | segment=[] |
| | | flag = 1 |
| | | title=strs |
| | | if strs.startswith("Percent Discrepancy"): |
| | | segment.append(strs) |
| | | numbers = reg_find_int(title) |
| | | key = ','.join(numbers) |
| | | dicts[key]=segment |
| | | flag = 0 |
| | | if flag ==1 : |
| | | segment.append(strs) |
| | | index=index+1 |
| | | |
| | | recharge = 0 |
| | | for key in dicts: |
| | | array = dicts[key] |
| | | temp=[] |
| | | for item in array: |
| | | if item.startswith("RECHARGE") : |
| | | strs = item.replace(" ", "").replace("RECHARGE=", "") |
| | | temp.append(float(strs)) |
| | | |
| | | recharge += (temp[0]-temp[1]) |
| | | |
| | | #æ²³æµå
¥æ¸éï¼æ£ï¼ ææåºåæï¼for Zone 3çWELLS项ç¸å |
| | | zone3 = 0 |
| | | for key in dicts: |
| | | if key.startswith("3,"): |
| | | array = dicts[key] |
| | | temp=[] |
| | | for item in array: |
| | | if item.startswith("WELLS") : |
| | | strs = item.replace(" ", "").replace("WELLS=", "") |
| | | temp.append(float(strs)) |
| | | |
| | | zone3 += (temp[0]-temp[1]) |
| | | |
| | | #L1ä¾§åè¡¥ç»éï¼æ£ï¼ ææåºåæï¼for Zone 4çIN-WELLS项ç¸å |
| | | Zone4_in_well=0 |
| | | for key in dicts: |
| | | if key.startswith("4,"): |
| | | array = dicts[key] |
| | | for item in array: |
| | | if item.startswith("WELLS") : |
| | | strs = item.replace(" ", "").replace("WELLS=", "") |
| | | data = float(strs) |
| | | Zone4_in_well +=data |
| | | break |
| | | |
| | | #L3ä¾§åè¡¥ç»éï¼æ£ï¼ ææåºåæï¼for Zone 8çIN-WELLS项ç¸å |
| | | Zone8_in_well =0 |
| | | for key in dicts: |
| | | if key.startswith("8,"): |
| | | array = dicts[key] |
| | | for item in array: |
| | | if item.startswith("WELLS") : |
| | | strs = item.replace(" ", "").replace("WELLS=", "") |
| | | data = float(strs) |
| | | Zone8_in_well +=data |
| | | break |
| | | |
| | | #人工å¼ééï¼è´ï¼ ææåºåæï¼for Zone 5çOUT-WELLS项ç¸å |
| | | zone5out =0 |
| | | for key in dicts: |
| | | if key.startswith("5,"): |
| | | array = dicts[key] |
| | | for item in array: |
| | | if item.startswith("WELLS") : |
| | | strs = item.replace(" ", "").replace("WELLS=", "") |
| | | data = float(strs) |
| | | zone5out +=data |
| | | |
| | | #L1ä¾§åæµåºéï¼è´ï¼ ææåºåæï¼for Zone 4çOUT-WELLS项ç¸å |
| | | Zone4_out_well=0 |
| | | for key in dicts: |
| | | if key.startswith("4,"): |
| | | array = dicts[key] |
| | | for item in array: |
| | | if item.startswith("WELLS") : |
| | | strs = item.replace(" ", "").replace("WELLS=", "") |
| | | data = float(strs) |
| | | Zone4_out_well +=data |
| | | |
| | | # L3=L3ä¾§åæµåºéï¼è´ï¼ |
| | | L3=0.0 |
| | | result =[recharge,zone3,Zone4_in_well,Zone8_in_well,zone5out,Zone4_out_well,L3] |
| | | for i in range(len(result)): |
| | | # æ¯è¡æ°æ®å®é
ä¸è®¡ç® çæ¯ ä¸ä¸ªstep , 1个step =10天 |
| | | result[i]= round(result[i]/100000000*10, 4) |
| | | |
| | | return result |
| | | |
| | | |
| | | #è§£ææ°´èµæºéæ°æ® |
| | | def water_res(model_name,paths): |
| | | data= read_txt(paths) |
| | | lens = len(data) |
| | | index = 0 |
| | | segment=[] |
| | | dicts={} |
| | | flag = 0 |
| | | title="" |
| | | while index < lens: |
| | | strs = data[index].strip() |
| | | if strs.startswith("Flow Budget for Zone"): |
| | | segment=[] |
| | | flag = 1 |
| | | title=strs |
| | | if strs.startswith("Percent Discrepancy"): |
| | | segment.append(strs) |
| | | numbers = reg_find_int(title) |
| | | key = ','.join(numbers) |
| | | dicts[key]=segment |
| | | flag = 0 |
| | | if flag ==1 : |
| | | segment.append(strs) |
| | | index=index+1 |
| | | |
| | | # 大æ°éæ°´ for zone1éææçrecharge项ç¸å ï¼å
æ¬inåoutï¼ |
| | | zone1_rechage = 0 |
| | | |
| | | for key in dicts: |
| | | if key.startswith("1,"): |
| | | array = dicts[key] |
| | | temp=[] |
| | | for item in array: |
| | | if item.startswith("RECHARGE") : |
| | | strs = item.replace(" ", "").replace("RECHARGE=", "") |
| | | temp.append(float(strs)) |
| | | |
| | | zone1_rechage += (temp[0]-temp[1]) |
| | | |
| | | #æ°¸å®æ²³æ¸æ¼ for zone1éææçzone2项ç¸å ï¼å
æ¬inåoutï¼ |
| | | zone1_well = 0 |
| | | for key in dicts: |
| | | if key.startswith("1,"): |
| | | array = dicts[key] |
| | | indata,outdata= 0,0 |
| | | for item in array: |
| | | if item.startswith("Zone 2 to 1") : |
| | | strs = item.replace(" ", "").replace("Zone2to1=", "") |
| | | indata = float(strs) |
| | | |
| | | if item.startswith("Zone 1 to 2") : |
| | | strs = item.replace(" ", "").replace("Zone1to2=", "") |
| | | outdata = float(strs) |
| | | |
| | | zone1_well += (indata-outdata) |
| | | |
| | | #ä¾§åæµå
¥ For zone7 éææçzone8项ç¸å ï¼å
æ¬inåoutï¼ |
| | | zone7=0 |
| | | for key in dicts: |
| | | if key.startswith("7,"): |
| | | array = dicts[key] |
| | | indata,outdata= 0,0 |
| | | for item in array: |
| | | if item.startswith("Zone 8 to 7") : |
| | | strs = item.replace(" ", "").replace("Zone8to7=", "") |
| | | indata = float(strs) |
| | | |
| | | if item.startswith("Zone 7 to 8") : |
| | | strs = item.replace(" ", "").replace("Zone7to8=", "") |
| | | outdata = float(strs) |
| | | |
| | | zone7 += (indata-outdata) |
| | | |
| | | #è¶æµææ³ For zone6 éææçzone7项ç¸å ï¼å
æ¬inåoutï¼ |
| | | zone6 =0 |
| | | for key in dicts: |
| | | if key.startswith("6,"): |
| | | array = dicts[key] |
| | | indata,outdata= 0,0 |
| | | for item in array: |
| | | if item.startswith("Zone 7 to 6") : |
| | | strs = item.replace(" ", "").replace("Zone7to6=", "") |
| | | indata = float(strs) |
| | | |
| | | if item.startswith("Zone 6 to 7") : |
| | | strs = item.replace(" ", "").replace("Zone6to7=", "") |
| | | outdata = float(strs) |
| | | zone6 += (indata-outdata) |
| | | |
| | | result =[zone1_rechage,zone1_well,zone7,zone6] |
| | | for i in range(len(result)): |
| | | result[i]= round(result[i]/100000000*10,4) |
| | | return result |
| | | |
| | | |
| | | #è§£ææ°´èµæºéæ°æ® |
| | | def water_res_month(model_name,paths,per): |
| | | data= read_txt(paths) |
| | | lens = len(data) |
| | | index = 0 |
| | | segment=[] |
| | | dicts={} |
| | | flag = 0 |
| | | title="" |
| | | while index < lens: |
| | | strs = data[index].strip() |
| | | if strs.startswith("Flow Budget for Zone"): |
| | | segment=[] |
| | | flag = 1 |
| | | title=strs |
| | | if strs.startswith("Percent Discrepancy"): |
| | | segment.append(strs) |
| | | numbers = reg_find_int(title) |
| | | key = ','.join(numbers) |
| | | dicts[key]=segment |
| | | flag = 0 |
| | | if flag ==1 : |
| | | segment.append(strs) |
| | | index=index+1 |
| | | |
| | | # 大æ°éæ°´ for zone1éææçrecharge项ç¸å ï¼å
æ¬inåoutï¼ |
| | | zone1_rechage = 0 |
| | | zone1_keys=[ "1,1,"+str(per+1),"1,2,"+str(per+1),"1,3,"+str(per+1)] |
| | | for key in zone1_keys: |
| | | array = dicts[key] |
| | | temp=[] |
| | | for item in array: |
| | | if item.startswith("RECHARGE") : |
| | | strs = item.replace(" ", "").replace("RECHARGE=", "") |
| | | temp.append(float(strs)) |
| | | zone1_rechage += (temp[0]-temp[1]) |
| | | |
| | | #æ°¸å®æ²³æ¸æ¼ for zone1éææçzone2项ç¸å ï¼å
æ¬inåoutï¼ |
| | | zone1_well = 0 |
| | | zone1_well_keys=["1,1,"+str(per+1),"1,2,"+str(per+1),"1,3,"+str(per+1)] |
| | | for key in zone1_well_keys: |
| | | array = dicts[key] |
| | | indata,outdata= 0,0 |
| | | for item in array: |
| | | if item.startswith("Zone 2 to 1") : |
| | | strs = item.replace(" ", "").replace("Zone2to1=", "") |
| | | indata = float(strs) |
| | | |
| | | if item.startswith("Zone 1 to 2") : |
| | | strs = item.replace(" ", "").replace("Zone1to2=", "") |
| | | outdata = float(strs) |
| | | |
| | | zone1_well += (indata-outdata) |
| | | |
| | | #ä¾§åæµå
¥ For zone7 éææçzone8项ç¸å ï¼å
æ¬inåoutï¼ |
| | | zone7=0 |
| | | zone7_keys=["7,1,"+str(per+1),"7,2,"+str(per+1),"7,3,"+str(per+1)] |
| | | for key in zone7_keys: |
| | | array = dicts[key] |
| | | indata,outdata= 0,0 |
| | | for item in array: |
| | | if item.startswith("Zone 8 to 7") : |
| | | strs = item.replace(" ", "").replace("Zone8to7=", "") |
| | | indata = float(strs) |
| | | |
| | | if item.startswith("Zone 7 to 8") : |
| | | strs = item.replace(" ", "").replace("Zone7to8=", "") |
| | | outdata = float(strs) |
| | | zone7 += (indata-outdata) |
| | | |
| | | #è¶æµææ³ For zone6 éææçzone7项ç¸å ï¼å
æ¬inåoutï¼ |
| | | zone6 =0 |
| | | zone6_keys=["6,1,"+str(per+1),"6,2,"+str(per+1),"6,3,"+str(per+1)] |
| | | for key in zone6_keys: |
| | | array = dicts[key] |
| | | indata,outdata= 0,0 |
| | | for item in array: |
| | | if item.startswith("Zone 7 to 6") : |
| | | strs = item.replace(" ", "").replace("Zone7to6=", "") |
| | | indata = float(strs) |
| | | |
| | | if item.startswith("Zone 6 to 7") : |
| | | strs = item.replace(" ", "").replace("Zone6to7=", "") |
| | | outdata = float(strs) |
| | | zone6 += (indata-outdata) |
| | | |
| | | result =[zone1_rechage,zone1_well,zone7,zone6] |
| | | for i in range(len(result)): |
| | | result[i]= round(result[i]/100000000*10, 4) |
| | | |
| | | return result |
| | | |
| | | #夿¨¡åçæ°´ä½ |
| | | def water_depth(model_name): |
| | | name_array = model_name.split(",") |
| | | |
| | | yhyMatrix = np.loadtxt(base.water_yhy_path, dtype=str,encoding='utf-8') |
| | | lshMatrix = np.loadtxt(base.water_lsh_path, dtype=str,encoding='utf-8') |
| | | dbwMatrix = np.loadtxt(base.water_dbw_path, dtype=str,encoding='utf-8') |
| | | |
| | | res ={} |
| | | #é¢ååï¼è²ç³æ¹ï¼ä¸åæºç水平线å夿¨¡åçå¤æçº¿å¼ |
| | | yhydata=[] |
| | | base1={"name":"å°è¡¨é«ç¨","data":[52]*12} |
| | | yhydata.append(base1) |
| | | |
| | | lshdata=[] |
| | | base2={"name":"å°è¡¨é«ç¨","data":[80]*12} |
| | | lshdata.append(base2) |
| | | |
| | | dbwdata=[] |
| | | base3={"name":"å°è¡¨é«ç¨","data":[49]*12} |
| | | dbwdata.append(base3) |
| | | |
| | | months = ModelPeriod.get_months_in_range_ym("2023-01","2023-12") |
| | | |
| | | for i in range(len(name_array)): |
| | | if name_array[i] != '': |
| | | |
| | | index = muiltyModelIndex(name_array[i]) |
| | | array1 = get_column(yhyMatrix,index) |
| | | array2 = get_column(lshMatrix,index) |
| | | array3 = get_column(dbwMatrix,index) |
| | | |
| | | yhydata.append({"name":name_array[i],"data":convertColumnData(array1)}) |
| | | lshdata.append({"name":name_array[i],"data":convertColumnData(array2)}) |
| | | dbwdata.append({"name":name_array[i],"data":convertColumnData(array3)}) |
| | | |
| | | rchMatrix = np.loadtxt(base.prefix + "base_water.ini", dtype=str,encoding='utf-8') |
| | | riverMatrix = np.loadtxt(base.prefix + "base_river.ini", dtype=str,encoding='utf-8') |
| | | pumpMatrix = np.loadtxt(base.prefix + "base_mining.ini", dtype=str,encoding='utf-8') |
| | | |
| | | rchdata=[] |
| | | rch_base1 = rchMatrix[1] |
| | | rch_base1_float =[] |
| | | for i in range (0,len(rch_base1)): |
| | | float_data = round(float(rch_base1[i])/9,2) |
| | | rch_base1_float.append(float_data) |
| | | |
| | | rchdata.append({"name":"åºåå¼","data":rch_base1_float}) |
| | | |
| | | riverdata=[] |
| | | riverdata.append({"name":"åºåå¼","data":riverMatrix.astype(float).tolist()}) |
| | | |
| | | pumpdata=[] |
| | | pumpX=pumpMatrix[1] |
| | | pump_float=[] |
| | | for i in range (0,len(pumpX)): |
| | | float_data = round(float(pumpX[i]),2) |
| | | pump_float.append(float_data) |
| | | |
| | | pumpdata.append({"name":"åºåå¼","data":pump_float}) |
| | | |
| | | res["xAxis"] = months |
| | | res["yhy_line"] = yhydata |
| | | res["lsh_line"] = lshdata |
| | | res["dbw_line"] = dbwdata |
| | | |
| | | |
| | | for i in range(len(name_array)): |
| | | if name_array[i] != '': |
| | | rchdata.append(rchBaseResult(rchMatrix,name_array[i])) |
| | | riverdata.append(riverBaseResult(riverMatrix, name_array[i])) |
| | | pumpdata.append(pumpBaseResult(pumpMatrix, name_array[i])) |
| | | |
| | | res["rch_line"] = rchdata |
| | | res["river_line"] = riverdata |
| | | res["pump_line"] = pumpdata |
| | | |
| | | yqsdata=[] |
| | | pyqdata=[] |
| | | sqdata=[] |
| | | for i in range(len(name_array)): |
| | | if name_array[i] != '': |
| | | paths = base.muiltyModel + name_array[i] + "\\modflow.head" |
| | | resdata = AchiveReport.getXs3LineChart(paths) |
| | | pyqdata.append({"name":name_array[i],"data":resdata["pyq"]}) |
| | | sqdata.append({"name":name_array[i],"data":resdata["sq"]}) |
| | | yqsdata.append({"name":name_array[i],"data":resdata["yqs"]}) |
| | | |
| | | res["yqs_line"] = yqsdata |
| | | res["sq_line"] = sqdata |
| | | res["pyq_line"] = pyqdata |
| | | |
| | | return res |
| | | |
| | | #å°åºæ¯æ°´ä½ ååå¹
|
| | | def xs_depth(model_name): |
| | | res={} |
| | | line1,line2=[],[] |
| | | paths = base.model_dir + model_name + "\\modflow.head" |
| | | if model_name == base.not_allowed_model: |
| | | paths = base.baseModel2 + "\\modflow.head" |
| | | |
| | | resdata = AchiveReport.getXs3LineChart(paths) |
| | | line1.append({"name":"å¹³ååº","data":roundArray(resdata["pyq"])}) |
| | | line1.append({"name":"å±±åº","data":roundArray(resdata["sq"])}) |
| | | line1.append({"name":"çæ³å±±å°åº","data":roundArray(resdata["yqs"])}) |
| | | res["depth"] = line1 |
| | | |
| | | line2.append({"name":"å¹³ååº","data":xs_bf(resdata["pyq"])}) |
| | | line2.append({"name":"å±±åº","data":xs_bf(resdata["sq"])}) |
| | | line2.append({"name":"çæ³å±±å°åº","data":xs_bf(resdata["yqs"])}) |
| | | res["bf"] = line2 |
| | | return res |
| | | |
| | | def xs_bf(array): |
| | | newlist=[] |
| | | newlist.append(0) |
| | | lens = len(array)-1 |
| | | for i in range(0,lens): |
| | | x = array[i+1]-array[i] |
| | | newlist.append(round(x,2)) |
| | | return newlist |
| | | |
| | | def roundArray(array): |
| | | newlist=[] |
| | | for item in array: |
| | | item = round(item,2) |
| | | newlist.append(item) |
| | | return newlist |
| | | |
| | | #rch åºåæ°æ® |
| | | def rchBaseResult(rchMatrix,sp): |
| | | rchDict ={} |
| | | rch_base1 = rchMatrix[1] |
| | | rch_base2 = rchMatrix[2] |
| | | rch_base3 = rchMatrix[3] |
| | | trump = getmuiltyModelparam(sp) |
| | | |
| | | types = trump[0] |
| | | rch_x = trump[1] |
| | | |
| | | if types ==1: |
| | | temp = muiltyArray(rch_base1,rch_x) |
| | | for i in range(0,len(temp)): |
| | | temp[i] =round(temp[i]/9,2) |
| | | rchDict={"name":sp,"data":temp} |
| | | if types ==2: |
| | | temp = muiltyArray(rch_base2,rch_x) |
| | | for i in range(0,len(temp)): |
| | | temp[i] =round(temp[i]/9,2) |
| | | rchDict={"name":sp,"data":temp} |
| | | if types ==3: |
| | | temp = muiltyArray(rch_base3,rch_x) |
| | | for i in range(0,len(temp)): |
| | | temp[i] =round(temp[i]/9,2) |
| | | rchDict={"name":sp,"data":temp} |
| | | |
| | | return rchDict |
| | | |
| | | #æ²³æµåºå æ°æ® |
| | | def riverBaseResult(riverMatrix,sp): |
| | | trump = getmuiltyModelparam(sp) |
| | | river_x = trump[2] |
| | | riverDict={"name":sp,"data":muiltyArray(riverMatrix,river_x)} |
| | | return riverDict |
| | | |
| | | |
| | | #æ½æ°´äºåºåæ°æ® |
| | | def pumpBaseResult(pumpMatrix,sp): |
| | | trump = getmuiltyModelparam(sp) |
| | | pump_x =trump[3] |
| | | pumpDict={"name":sp,"data":muiltyArray(pumpMatrix[1],pump_x)} |
| | | return pumpDict |
| | | |
| | | |
| | | def muiltyArray(array,scale): |
| | | result =[] |
| | | for item in array: |
| | | x= round(float(item) * scale,2) |
| | | result.append(x) |
| | | return result |
| | | |
| | | |
| | | def convertColumnData(array): |
| | | result =[] |
| | | new_list=[] |
| | | for i in range(len(array)): |
| | | if i!= 0: |
| | | data = transToNum(array[i]) |
| | | result.append(data) |
| | | for index in range(len(result)): |
| | | if index % 3 == 0: |
| | | new_list.append(result[index]) |
| | | return new_list |
| | | |
| | | def transToNum(str): |
| | | data = 0 |
| | | try: |
| | | data= round(float(str),2) |
| | | return data |
| | | except ValueError(): |
| | | return 0 |
| | | |
| | | |
| | | #è·å颿µåºæ¯ç䏿 å¼ï¼ éè¦ä»ini æä»¶ä¸æç
§åè¿è¡è¯»å |
| | | def muiltyModelIndex(name): |
| | | models= muiltyModelList() |
| | | indexs = models.index(name) |
| | | return indexs |
| | | |
| | | #颿µæ¨¡åå表 |
| | | def muiltyModelList(): |
| | | models=["SP0-0","SP1-1","SP1-2","SP1-3","SP2-1","SP2-2","SP2-3","SP3-1", |
| | | "SP3-2","SP3-4","SP3-5","SP3-6","SP3-7","SP4-1","SP4-7"] |
| | | return models |
| | | |
| | | |
| | | |
| | | #Npè·åæä¸åçå¼ |
| | | def get_column(matrix, column_number): |
| | | column = matrix[:, column_number] |
| | | return column |
| | | |
| | | #å
ç»ä¸ 1=éé¨ç±»å 2=åæ° 3,æ²³æµåæ° 4,æ½æ°´äºåæ° |
| | | def getmuiltyModelparam(sp): |
| | | dicts={ |
| | | "SP0-0":(1,1,1,1), |
| | | "SP1-1":(2,1,1,1), |
| | | "SP1-2":(3,1,1,1), |
| | | "SP1-3":(3,1.2,1,1), |
| | | "SP2-1":(1,1,2,1), |
| | | "SP2-2":(1,1,5,1), |
| | | "SP2-3":(1,1,10,1), |
| | | "SP3-1":(1,1,1,0.25), |
| | | "SP3-2":(1,1,1,0.5), |
| | | "SP3-4":(1,1,1,0), |
| | | "SP3-5":(1,1,1,0.4), |
| | | "SP3-6":(1,1,1,0.3), |
| | | "SP3-7":(1,1,1,0.6), |
| | | |
| | | "SP4-1":(1,1,2,0.5), |
| | | "SP4-7":(3,1.2,10,0)} |
| | | return dicts[sp] |
New file |
| | |
| | | import cx_Oracle |
| | | import pymssql |
| | | from datetime import datetime, timedelta |
| | | import numpy as np |
| | | # select * from swzdh.rain; |
| | | # select * from swzdh.river; |
| | | # select * from swzdh.gw; |
| | | |
| | | |
| | | |
| | | #麻峪é¨éç« |
| | | mayu_rainfall="30747850" |
| | | |
| | | #éé©¾åºæ°´æç« |
| | | ljz_swz="30700450" |
| | | |
| | | #LQWBï¼é¾æ³ï¼/ZSSCï¼åçæ°´åï¼/WTYï¼æ¢§æ¡èï¼/LSHï¼è²ç³æ¹ï¼/HZZï¼ä¾¯åºåï¼/GCï¼å¤åï¼ |
| | | arr1=["30773702","30769360","30769280","30567304","30769320","30567303"] |
| | | |
| | | |
| | | haidian_rainfall="xxxx" |
| | | |
| | | #SXCï¼åå
´æ93ï¼/XM1ï¼é¢åå西é¨1ï¼/XM2ï¼é¢åå西é¨2ï¼/SYSCï¼æ°´æºä¸åº336ï¼/SJYYï¼åå£å¾¡å2ï¼/BWï¼ååï¼ |
| | | arr2=["30566324","30565080","30564400","30566335","30564840","30564880"] |
| | | |
| | | #è·åæ°¸å®æ²³15天ç颿µéè¦çæ°æ® |
| | | def get_ydh15_real_data(): |
| | | result =[] |
| | | |
| | | current_time = datetime.now() |
| | | start_time = current_time- timedelta(days=60) |
| | | times=[] |
| | | #æ¶é´åºå |
| | | for i in range(75): |
| | | tmp = start_time + timedelta(days=i) |
| | | times.append(tmp.strftime("%Y-%m-%d")) |
| | | |
| | | #å¼å§æ¶é´ ç»ææ¶é´ |
| | | start_str = start_time.strftime("%Y-%m-%d") |
| | | end_str= current_time.strftime("%Y-%m-%d") |
| | | |
| | | |
| | | |
| | | #éº»å³ªæ°æ® |
| | | mayu_data = get_data("rain",mayu_rainfall,start_str,end_str) |
| | | mayu_dict={} |
| | | for i in range(len(mayu_data)): |
| | | time = mayu_data[i]["time"] |
| | | value = mayu_data[i]["value"] |
| | | if time not in mayu_dict: |
| | | mayu_dict[time] = value |
| | | |
| | | |
| | | #éé©¾åº |
| | | ljz_data = get_data("river",ljz_swz,start_str,end_str) |
| | | ljz_dict ={} |
| | | for i in range(len(ljz_data)): |
| | | time = ljz_data[i]["time"] |
| | | value = ljz_data[i]["value"] |
| | | if time not in ljz_data: |
| | | ljz_dict[time] = value |
| | | |
| | | mayu_value=[] |
| | | ljz_value=[] |
| | | |
| | | for i in range(len(times)): |
| | | tmp = times[i] |
| | | if tmp in mayu_dict: |
| | | mayu_value.append(tmp) |
| | | else: |
| | | mayu_value.append(0) |
| | | |
| | | if tmp in ljz_dict: |
| | | ljz_value.append(tmp) |
| | | else: |
| | | ljz_value.append(0) |
| | | |
| | | |
| | | result.append(times) |
| | | result.append(mayu_value) |
| | | result.append(ljz_value) |
| | | |
| | | for i in(len(arr1)): |
| | | data = get_data("gw",arr1[i],start_str,end_str) |
| | | dictx ={} |
| | | tmp_arr=[] |
| | | for i in range(len(data)): |
| | | time = data[i]["time"] |
| | | value = data[i]["value"] |
| | | if time not in dictx: |
| | | dictx[time] = value |
| | | |
| | | |
| | | for i in range(len(times)): |
| | | tmp = times[i] |
| | | if tmp in dictx: |
| | | tmp_arr.append(tmp) |
| | | else: |
| | | tmp_arr.append(0) |
| | | |
| | | result.append(tmp_arr) |
| | | |
| | | np_arr = np.asarray(result) |
| | | #æ°ç»è½¬ç½® |
| | | np_result = np_arr.T |
| | | |
| | | return np_result |
| | | |
| | | |
| | | #è·åçæ³å±±15天ç颿µéè¦çæ°æ® |
| | | def getyqs15_real_data(): |
| | | |
| | | |
| | | result =[] |
| | | |
| | | current_time = datetime.now() |
| | | start_time = current_time- timedelta(days=60) |
| | | times=[] |
| | | #æ¶é´åºå |
| | | for i in range(75): |
| | | tmp = start_time + timedelta(days=i) |
| | | times.append(tmp.strftime("%Y-%m-%d")) |
| | | |
| | | #å¼å§æ¶é´ ç»ææ¶é´ |
| | | start_str = start_time.strftime("%Y-%m-%d") |
| | | end_str= current_time.strftime("%Y-%m-%d") |
| | | |
| | | |
| | | |
| | | #éº»å³ªæ°æ® |
| | | hd_data = get_data("rain",haidian_rainfall,start_str,end_str) |
| | | hd_dict={} |
| | | for i in range(len(hd_data)): |
| | | time = hd_data[i]["time"] |
| | | value = hd_data[i]["value"] |
| | | if time not in hd_dict: |
| | | hd_dict[time] = value |
| | | |
| | | |
| | | #éé©¾åº |
| | | ljz_data = get_data("river",ljz_swz,start_str,end_str) |
| | | ljz_dict ={} |
| | | for i in range(len(ljz_data)): |
| | | time = ljz_data[i]["time"] |
| | | value = ljz_data[i]["value"] |
| | | if time not in ljz_data: |
| | | ljz_dict[time] = value |
| | | |
| | | hd_value=[] |
| | | ljz_value=[] |
| | | |
| | | for i in range(len(times)): |
| | | tmp = times[i] |
| | | if tmp in hd_dict: |
| | | hd_value.append(tmp) |
| | | else: |
| | | hd_value.append(0) |
| | | |
| | | if tmp in ljz_dict: |
| | | ljz_value.append(tmp) |
| | | else: |
| | | ljz_value.append(0) |
| | | |
| | | |
| | | result.append(times) |
| | | result.append(hd_value) |
| | | result.append(ljz_value) |
| | | |
| | | for i in(len(arr2)): |
| | | data = get_data("gw",arr2[i],start_str,end_str) |
| | | dictx ={} |
| | | tmp_arr=[] |
| | | for i in range(len(data)): |
| | | time = data[i]["time"] |
| | | value = data[i]["value"] |
| | | if time not in dictx: |
| | | dictx[time] = value |
| | | |
| | | |
| | | for i in range(len(times)): |
| | | tmp = times[i] |
| | | if tmp in dictx: |
| | | tmp_arr.append(tmp) |
| | | else: |
| | | tmp_arr.append(0) |
| | | |
| | | result.append(tmp_arr) |
| | | |
| | | np_arr = np.asarray(result) |
| | | #æ°ç»è½¬ç½® |
| | | np_result = np_arr.T |
| | | |
| | | return np_result |
| | | |
| | | |
| | | |
| | | #################################################### |
| | | |
| | | def get_data(types,num,start_time,end_time): |
| | | |
| | | if types =='river': |
| | | return river_list(start_time,end_time,num) |
| | | if types =='rain': |
| | | return rain_list(start_time,end_time,num) |
| | | |
| | | if types =='gw': |
| | | return gw_list(start_time,end_time,num) |
| | | |
| | | #æ²³æµ æ°´æç« |
| | | def river_list(start_time,end_time,STCD): |
| | | # 建ç«ä¸Oracleæ°æ®åºçè¿æ¥ |
| | | connection = cx_Oracle.connect('mzy/mzy_^22dPoO0@192.168.44.8:1521/swzdh') |
| | | # å建游æ 对象 |
| | | cursor = connection.cursor() |
| | | |
| | | sql = """ |
| | | SELECT * FROM swzdh.river |
| | | WHERE STCD = :STCD and tm BETWEEN TO_DATE(:start_time, 'YYYY-MM-DD') AND TO_DATE(:end_time, 'YYYY-MM-DD') order by tm ASC |
| | | """ |
| | | res =[] |
| | | try: |
| | | cursor.execute(sql, {'start_time': start_time, 'end_time': end_time,'STCD':STCD}) |
| | | |
| | | column_names = [row[0] for row in cursor.description] |
| | | print("åæ®µå:", column_names) |
| | | |
| | | # è·åææç»æé |
| | | result = cursor.fetchall() |
| | | |
| | | # è¾åºç»æ |
| | | for row in result: |
| | | times = row[2] |
| | | date_str = times.strftime("%Y-%m-%d") |
| | | dicts={"time":date_str,"value":row[4],"stname":row[1]} |
| | | res.append(dicts) |
| | | |
| | | except Exception as e: |
| | | print("Error occurred: ", str(e)) |
| | | finally: |
| | | # å
³é游æ åè¿æ¥ |
| | | cursor.close() |
| | | connection.close() |
| | | return res |
| | | |
| | | #éé¨é |
| | | def rain_list(start_time,end_time,STCD): |
| | | # 建ç«ä¸Oracleæ°æ®åºçè¿æ¥ |
| | | connection = cx_Oracle.connect('mzy/mzy_^22dPoO0@192.168.44.8:1521/swzdh') |
| | | # å建游æ 对象 |
| | | cursor = connection.cursor() |
| | | |
| | | sql = """ |
| | | SELECT * FROM swzdh.rain |
| | | WHERE STCD = :STCD and tm BETWEEN TO_DATE(:start_time, 'YYYY-MM-DD') AND TO_DATE(:end_time, 'YYYY-MM-DD') order by tm ASC |
| | | """ |
| | | res =[] |
| | | try: |
| | | cursor.execute(sql, {'start_time': start_time, 'end_time': end_time,'STCD':STCD}) |
| | | |
| | | column_names = [row[0] for row in cursor.description] |
| | | print("åæ®µå:", column_names) |
| | | |
| | | # è·åææç»æé |
| | | result = cursor.fetchall() |
| | | print(result) |
| | | |
| | | date_str_arr=[] |
| | | date_str_dict={} |
| | | # è¾åºç»æ |
| | | name ="" |
| | | for row in result: |
| | | name= row[1] |
| | | times = row[2] |
| | | date_str = times.strftime("%Y-%m-%d") |
| | | R = row[3] |
| | | if date_str in date_str_dict: |
| | | date_str_dict[date_str] = date_str_dict[date_str] + R |
| | | else: |
| | | date_str_dict[date_str] = R |
| | | date_str_arr.append(date_str) |
| | | for item in date_str_arr: |
| | | value = round(date_str_dict[item],2) |
| | | temp ={"time":item,"value":value,"stname":name} |
| | | res.append(temp) |
| | | |
| | | except Exception as e: |
| | | print("Error occurred: ", str(e)) |
| | | finally: |
| | | # å
³é游æ åè¿æ¥ |
| | | cursor.close() |
| | | connection.close() |
| | | return res |
| | | |
| | | |
| | | # å°ä¸æ°´çæµäº 两个表 V_WT_YRSï¼V_Z_YRS |
| | | def gw_list(start_time,end_time,STCD): |
| | | |
| | | conn = pymssql.connect(server='192.168.44.66', |
| | | user='xsyrs', |
| | | password='gws@xsyrs2024', |
| | | database='DB_DXS', |
| | | as_dict=True) |
| | | |
| | | cursor = conn.cursor() |
| | | res =[] |
| | | try: |
| | | sql = "SELECT * FROM V_Z_YRS where STCD = '" +STCD +"' and TM >= '"+start_time +"' and TM <= '"+end_time +"' order by TM ASC" |
| | | cursor.execute(sql) |
| | | result = cursor.fetchall() |
| | | |
| | | for row in result: |
| | | times = row["TS"] |
| | | date_str = times.strftime("%Y-%m-%d") |
| | | value = float(row["Z"]) |
| | | bd = float(row["BD"]) |
| | | dicts={"time":date_str,"value":value,"bd":bd} |
| | | res.append(dicts) |
| | | |
| | | |
| | | except Exception as e: |
| | | print("Error occurred:", str(e)) |
| | | finally: |
| | | cursor.close() |
| | | conn.close() |
| | | return res |
New file |
| | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Wed Mar 20 14:44:36 2024 |
| | | |
| | | @author: ZMK |
| | | """ |
| | | import numpy as np |
| | | import flopy |
| | | from openpyxl import load_workbook |
| | | import os |
| | | import Base as base |
| | | import CalHead |
| | | |
| | | #æ²³æµçåæ®µä¸æ |
| | | riv_seg={0:[454,479],1:[480,505],2:[506,527],3:[528,562]} |
| | | riv_seg_celles=[26,26,22,35] |
| | | #è¾¹ççåæ®µä¸æ |
| | | ibound_seg={0:[1,86],1:[87,111],2:[112,142],3:[143,170],4:[171,240], |
| | | 5:[241,282],6:[283,354],7:[355,393],8:[394,436],9:[437,453]} |
| | | |
| | | |
| | | |
| | | def base_excel(model_name,types): |
| | | if types =='éé¨': |
| | | return rch_excel(model_name) |
| | | if types =='æ²³æµ': |
| | | return river_excel(model_name) |
| | | if types =='å¼éé': |
| | | return well_excel(model_name) |
| | | if types =='è¾¹ç': |
| | | return ibound_excel(model_name) |
| | | |
| | | return [] |
| | | |
| | | |
| | | #éé¨cxcel |
| | | def rch_excel(model_name): |
| | | paths = base.model_dir + model_name +"\\éé¨.xlsx" |
| | | data=[] |
| | | if not os.path.exists(paths): |
| | | return data |
| | | wb = load_workbook(filename = paths) |
| | | ws = wb[wb.sheetnames[0]] |
| | | |
| | | for row in ws.iter_rows(): |
| | | tmp =[] |
| | | for cell in row: |
| | | tmp.append(cell.value) |
| | | data.append(tmp) |
| | | wb.close() |
| | | #åæéæ°´å¹¶ä¸æ´æ° |
| | | rch_analysis(data,model_name) |
| | | return data |
| | | |
| | | def rch_analysis(data_array,model_name): |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | #å¨ææ° |
| | | periods_len= len(periods) |
| | | |
| | | array = np.asarray(data_array) |
| | | fid= array[1:17,2] |
| | | params = array[1:17,3] |
| | | float_params = np.asarray(params,dtype=float) |
| | | #ååºæ¥ 1-16è¡ 4-col_laståçæ°æ® |
| | | col_last = 4 + periods_len |
| | | data = array[1:17,4:col_last] |
| | | float_data = np.asarray(data,dtype=float) |
| | | |
| | | for i in range(0,len(float_data)): |
| | | for j in range(0,len(float_data[i])): |
| | | float_data[i][j] = float_data[i][j] * float_params[i] /30/100 |
| | | |
| | | rch_update(float_data,fid,model_name,periods_len) |
| | | |
| | | #æ´æ°æ¨¡årchæ°æ® |
| | | def rch_update(float_data,fids,model_name,periods_len): |
| | | |
| | | if model_name == base.not_allowed_model or model_name in base.archive_models: |
| | | return "æ¬æ¨¡å为éªè¯æ¨¡å,ä¸å
许修æ¹ï¼" |
| | | model_ws = base.model_dir + model_name |
| | | mx = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws, exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | areas= base.getAreas() |
| | | |
| | | for per in range(periods_len): |
| | | #rch ä¸ä¸ªå¨æçæ°æ® |
| | | item = mx.rch.rech.__getitem__(kper=per) |
| | | array2d = item.get_value() |
| | | |
| | | per_data = float_data[:,per] |
| | | print(per_data) |
| | | |
| | | for i in range(0,len(per_data)): |
| | | data =round(float(per_data[i]),8) |
| | | fid = fids[i] |
| | | tuples = areas[fid] |
| | | for entity in tuples: |
| | | x = entity[0] |
| | | y = entity[1] |
| | | array2d[x][y]= data |
| | | |
| | | mx.rch.rech.__setitem__(key=per, value=array2d) |
| | | |
| | | |
| | | rch = flopy.modflow.ModflowRch(mx,nrchop=mx.rch.nrchop, |
| | | ipakcb=mx.rch.ipakcb, |
| | | rech=mx.rch.rech, |
| | | irch =mx.rch.irch) |
| | | rch.write_file(check=False) |
| | | |
| | | |
| | | #æ²³æµæ°æ®è§£æ |
| | | def river_excel(model_name): |
| | | paths = base.model_dir + model_name +"\\æ²³æµ.xlsx" |
| | | data=[] |
| | | if not os.path.exists(paths): |
| | | return data |
| | | wb = load_workbook(filename = paths) |
| | | ws = wb[wb.sheetnames[0]] |
| | | |
| | | for row in ws.iter_rows(): |
| | | tmp =[] |
| | | for cell in row: |
| | | tmp.append(cell.value) |
| | | data.append(tmp) |
| | | wb.close() |
| | | result =[] |
| | | for i in range(1,len(data)): |
| | | result.append(data[i]) |
| | | |
| | | river_analysis(result,model_name) |
| | | return result |
| | | |
| | | #åææ²³æµæ°æ® |
| | | def river_analysis(data_array,model_name): |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | |
| | | row_last= 1 + periods_len |
| | | array = np.asarray(data_array) |
| | | data = array[1:row_last,1:2] |
| | | data2 = array[1:row_last,1:4] |
| | | |
| | | merge_array = np.concatenate((data,data2),axis=1) |
| | | |
| | | params = array[1:row_last,5:9] |
| | | |
| | | float_data = np.asarray(merge_array,dtype=float) |
| | | float_params = np.asarray(params,dtype=float) |
| | | |
| | | for i in range(0,len(float_data)): |
| | | for j in range(0,len(float_data[i])): |
| | | temp = round(float_data[i][j] * float_params[i][j] *30*86400 ,4) |
| | | last = round(temp/riv_seg_celles[j]/30,4) |
| | | float_data[i][j]=last |
| | | print(float_data) |
| | | river_update(float_data,model_name,periods_len) |
| | | |
| | | |
| | | #æ´æ°æ²³æµæ°æ® |
| | | def river_update(float_data,model_name,periods_len): |
| | | |
| | | if model_name == base.not_allowed_model or model_name in base.archive_models: |
| | | return "æ¬æ¨¡å为éªè¯æ¨¡å,ä¸å
许修æ¹ï¼" |
| | | |
| | | model_ws = base.model_dir + model_name |
| | | ml = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws, exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | #循ç¯è®¾ç½®welæä»¶ï¼æ´æ°æ°æ® |
| | | # lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]} |
| | | lrcq = {} |
| | | |
| | | for per in range(periods_len): |
| | | |
| | | wel = [] |
| | | wel = ml.wel.stress_period_data.__getitem__(kper=per) |
| | | #æ²³æµåæ®µæ°æ® |
| | | segment0 = float_data[per][0] |
| | | segment1 = float_data[per][1] |
| | | segment2 = float_data[per][2] |
| | | segment3 = float_data[per][3] |
| | | |
| | | #å卿¯ä¸ªåºåæçæ°æ® |
| | | array2d = [] |
| | | count = 1 |
| | | for Layer, Row, Column, Q in wel: |
| | | array = [] |
| | | |
| | | if count>= riv_seg[0][0] and count <= riv_seg[0][1]: |
| | | array = [Layer, Row, Column, segment0] |
| | | |
| | | elif count>= riv_seg[1][0] and count <= riv_seg[1][1]: |
| | | array = [Layer, Row, Column, segment1] |
| | | |
| | | elif count>= riv_seg[2][0] and count <= riv_seg[2][1]: |
| | | array = [Layer, Row, Column, segment2] |
| | | |
| | | elif count>= riv_seg[3][0] and count <= riv_seg[3][1]: |
| | | array = [Layer, Row, Column, segment3] |
| | | else: |
| | | array = [Layer, Row, Column, Q] |
| | | |
| | | array2d.append(array) |
| | | count +=1 |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | |
| | | flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb, |
| | | dtype=ml.wel.dtype, |
| | | options=ml.wel.options, |
| | | stress_period_data=lrcq) |
| | | ml.write_input() |
| | | |
| | | |
| | | |
| | | #è§£æå¼ééçæ°æ® |
| | | def well_excel(model_name): |
| | | paths = base.model_dir + model_name +"\\å¼éé.xlsx" |
| | | data=[] |
| | | if not os.path.exists(paths): |
| | | return data |
| | | wb = load_workbook(filename = paths) |
| | | |
| | | ws = wb[wb.sheetnames[0]] |
| | | |
| | | for row in ws.iter_rows(): |
| | | tmp =[] |
| | | for cell in row: |
| | | tmp.append(cell.value) |
| | | data.append(tmp) |
| | | wb.close() |
| | | result =[] |
| | | for i in range(1,len(data)): |
| | | result.append(data[i]) |
| | | well_analysis(result,model_name) |
| | | return result |
| | | |
| | | |
| | | #å¼ééçåæ |
| | | def well_analysis(data_array,model_name): |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | row_last= 1 + periods_len |
| | | |
| | | array = np.asarray(data_array) |
| | | data = array[1:row_last,1:5] |
| | | float_data= np.asarray(data,dtype=float) |
| | | |
| | | #转置æ°ç» |
| | | zz_array = float_data.transpose() |
| | | |
| | | zz_data = [] |
| | | for i in range(50): |
| | | zz_data.append(zz_array[0]) |
| | | for i in range(49): |
| | | zz_data.append(zz_array[1]) |
| | | for i in range(18): |
| | | zz_data.append(zz_array[2]) |
| | | for i in range(12): |
| | | zz_data.append(zz_array[3]) |
| | | zz_data.append(zz_array[0]) |
| | | |
| | | np_data = np.asarray(zz_data,dtype=float) |
| | | |
| | | well_scale = np.loadtxt(base.well_scale_path, dtype=str) |
| | | float_scale= np.asarray(well_scale,dtype=float) |
| | | |
| | | ##æ½æ°´é = åååºæå¼éé*è¡éæ¯ä¾*10000/(è¡éäºæ°*30) |
| | | for i in range(0,len(np_data)): |
| | | for j in range(0,len(np_data[i])): |
| | | tmp = np_data[i][j] * float_scale[i][0]*10000/( float_scale[i][1] *30) |
| | | np_data[i][j] = round(tmp,4) |
| | | |
| | | well_update(np_data,model_name,periods_len) |
| | | |
| | | |
| | | #æ´æ°å¼éé |
| | | def well_update(np_data,model_name,periods_len): |
| | | |
| | | if model_name == base.not_allowed_model or model_name in base.archive_models: |
| | | return "æ¬æ¨¡å为éªè¯æ¨¡å,ä¸å
许修æ¹ï¼" |
| | | |
| | | model_ws = base.model_dir + model_name |
| | | ml = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws, exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | #循ç¯è®¾ç½®welæä»¶ï¼æ´æ°æ°æ® |
| | | # lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]} |
| | | lrcq = {} |
| | | for per in range(periods_len): |
| | | wel = [] |
| | | wel = ml.wel.stress_period_data.__getitem__(kper=per) |
| | | |
| | | per_data = np_data[:,per] |
| | | |
| | | #å卿¯ä¸ªåºåæçæ°æ® |
| | | array2d = [] |
| | | |
| | | #wellæä»¶ä¸ å562ä¸ªç½æ ¼ä¸æ¯äº |
| | | # Layer= wel[i][0] |
| | | # Row= wel[i][1] |
| | | # Col = wel[i][2] |
| | | # Q = wel[i][3] |
| | | for i in range(0,562): |
| | | array = [wel[i][0],wel[i][1], wel[i][2],wel[i][3]] |
| | | array2d.append(array) |
| | | |
| | | for i in range(562,len(wel)): |
| | | indexid = i-562 |
| | | update_data=per_data[indexid] |
| | | array = [wel[i][0],wel[i][1], wel[i][2],update_data] |
| | | array2d.append(array) |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,dtype=ml.wel.dtype, |
| | | options=ml.wel.options, |
| | | stress_period_data=lrcq) |
| | | ml.write_input() |
| | | |
| | | |
| | | |
| | | |
| | | #è§£æè¾¹ççæ°æ® |
| | | def ibound_excel(model_name): |
| | | paths = base.model_dir + model_name +"\\è¾¹ç.xlsx" |
| | | data=[] |
| | | if not os.path.exists(paths): |
| | | return data |
| | | wb = load_workbook(filename = paths) |
| | | |
| | | ws = wb[wb.sheetnames[0]] |
| | | |
| | | for row in ws.iter_rows(): |
| | | tmp =[] |
| | | for cell in row: |
| | | tmp.append(cell.value) |
| | | data.append(tmp) |
| | | wb.close() |
| | | result =[] |
| | | for i in range(1,len(data)): |
| | | result.append(data[i]) |
| | | |
| | | np_array = ibound_analysis(result,model_name) |
| | | |
| | | #è¿å页é¢å±ç¤ºçå
容æ°ç» |
| | | view_data = ibound_view_data(np_array) |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | |
| | | ibound_update(np_array,model_name,periods_len) |
| | | |
| | | return view_data |
| | | |
| | | |
| | | |
| | | |
| | | #æ´æ°è¾¹ç |
| | | def ibound_update(np_array,model_name,periods_len): |
| | | |
| | | if model_name == base.not_allowed_model or model_name in base.archive_models: |
| | | return "æ¬æ¨¡å为éªè¯æ¨¡å,ä¸å
许修æ¹ï¼" |
| | | |
| | | model_ws = base.model_dir + model_name |
| | | ml = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws, exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | #循ç¯è®¾ç½®welæä»¶ï¼æ´æ°æ°æ® |
| | | # lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]} |
| | | lrcq = {} |
| | | for per in range(periods_len): |
| | | wel = [] |
| | | wel = ml.wel.stress_period_data.__getitem__(kper=per) |
| | | per_data = np_array[:,per] |
| | | |
| | | #å卿¯ä¸ªåºåæçæ°æ® |
| | | array2d = [] |
| | | |
| | | count = 1 |
| | | for Layer, Row, Column, Q in wel: |
| | | array = [] |
| | | |
| | | if count>= ibound_seg[0][0] and count <= ibound_seg[0][1]: |
| | | array = [Layer, Row, Column, per_data[0]] |
| | | |
| | | elif count>= ibound_seg[1][0] and count <= ibound_seg[1][1]: |
| | | array = [Layer, Row, Column, per_data[1]] |
| | | |
| | | elif count>= ibound_seg[2][0] and count <= ibound_seg[2][1]: |
| | | array = [Layer, Row, Column, per_data[2]] |
| | | |
| | | elif count>= ibound_seg[3][0] and count <= ibound_seg[3][1]: |
| | | array = [Layer, Row, Column, per_data[3]] |
| | | |
| | | elif count>= ibound_seg[4][0] and count <= ibound_seg[4][1]: |
| | | array = [Layer, Row, Column, per_data[4]] |
| | | |
| | | elif count>= ibound_seg[5][0] and count <= ibound_seg[5][1]: |
| | | array = [Layer, Row, Column, per_data[5]] |
| | | |
| | | elif count>= ibound_seg[6][0] and count <= ibound_seg[6][1]: |
| | | array = [Layer, Row, Column, per_data[6]] |
| | | |
| | | elif count>= ibound_seg[7][0] and count <= ibound_seg[7][1]: |
| | | array = [Layer, Row, Column, per_data[7]] |
| | | |
| | | elif count>= ibound_seg[8][0] and count <= ibound_seg[8][1]: |
| | | array = [Layer, Row, Column, per_data[8]] |
| | | |
| | | elif count>= ibound_seg[9][0] and count <= ibound_seg[9][1]: |
| | | array = [Layer, Row, Column, per_data[9]] |
| | | |
| | | else: |
| | | array = [Layer, Row, Column, Q] |
| | | |
| | | array2d.append(array) |
| | | count +=1 |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb, |
| | | dtype=ml.wel.dtype, |
| | | options=ml.wel.options, |
| | | stress_period_data=lrcq) |
| | | ml.write_input() |
| | | |
| | | |
| | | |
| | | #è¾¹ççåæ |
| | | def ibound_analysis(data_array,model_name): |
| | | array = np.asarray(data_array) |
| | | data = array[1:10,2:14] |
| | | #å¤§å° åè¾æ¿ ä¸èç¸ ä¸å®¶åº éº»è£ æ¸©æ³ æµ·æ· é³å æ²æ²³ |
| | | float_data= np.asarray(data,dtype=float) |
| | | |
| | | # æµå
¥1-1 (1-6)å¤§å° åè¾æ¿ ä¸èç¸ ä¸å®¶åº éº»è£ æ¸©æ³ |
| | | result =[] |
| | | data1= float_data[[0,1,2,3,4,5]] |
| | | |
| | | sum1 = data1.sum(axis = 0) |
| | | |
| | | result.append(sum1) |
| | | |
| | | # æµå
¥1-2()ä¸èç¸ |
| | | data2= float_data[2] |
| | | |
| | | result.append(data2) |
| | | #æµå
¥1-3() å¤§å° |
| | | data3 = float_data[0] |
| | | result.append(data3) |
| | | |
| | | # æµåº1-1 å¤§å° åè¾æ¿ |
| | | data4 = float_data[[0,1]] |
| | | sum4 = data4.sum(axis = 0) |
| | | result.append((-sum4)) |
| | | # æµåº1-2 å¤§å° |
| | | data5 = float_data[0] |
| | | result.append((-data5)) |
| | | |
| | | #æµå
¥3-1 å¤§å° |
| | | data6 = float_data[0] |
| | | result.append(data6) |
| | | |
| | | # æµå
¥3-2 å¤§å° åè¾æ¿ ä¸èç¸ |
| | | data7 = float_data[[0,1,2]] |
| | | sum7 = data7.sum(axis = 0) |
| | | result.append(sum7) |
| | | |
| | | # æµå
¥3-3 å¤§å° åè¾æ¿ ä¸èç¸ |
| | | # data8= float_data[[0,1,2]] |
| | | result.append(sum7) |
| | | # æµå
¥3-4 å¤§å° åè¾æ¿ ä¸èç¸ |
| | | # data9= float_data[[0,1,2]] |
| | | result.append(sum7) |
| | | # æµå
¥3-5 å¤§å° åè¾æ¿ ä¸èç¸ |
| | | # data10= float_data[[0,1,2]] |
| | | result.append(sum7) |
| | | |
| | | np_data = np.asarray(result,dtype=float) |
| | | |
| | | np_data = np.around(np_data, decimals=2) |
| | | |
| | | return np_data |
| | | |
| | | |
| | | def ibound_view_data(np_data): |
| | | |
| | | names=['æµå
¥1-1','æµå
¥1-2','æµå
¥1-3','æµåº1-1','æµåº1-2', |
| | | 'æµå
¥3-1','æµå
¥3-2','æµå
¥3-3','æµå
¥3-4','æµå
¥3-5'] |
| | | row_sums = np.sum(np_data, axis=1) |
| | | row_sums= np.around(row_sums,2) |
| | | |
| | | params=[1,1,1,2.5,1,0.5,0.5,0.5,0.5,0.5] |
| | | cells=[86,47,31,28,50,42,72,39,43,17] |
| | | per=12 |
| | | x=30 |
| | | water=[] |
| | | for i in range(0,len(names)): |
| | | tmp = round( row_sums[i] * params[i] * cells[i] * per *x , 2) |
| | | water.append(tmp) |
| | | arr=[] |
| | | arr.append(names) |
| | | arr.append(row_sums) |
| | | arr.append(params) |
| | | arr.append(water) |
| | | str_np = np.asarray(arr,dtype=str) |
| | | zz= str_np.transpose() |
| | | |
| | | title =['è¾¹ç','éé¨é','ç³»æ°','è¾å
¥å¼'] |
| | | result =[] |
| | | result.append(title) |
| | | |
| | | for item in zz: |
| | | result.append(item) |
| | | |
| | | result = np.asarray(result,dtype=str).tolist() |
| | | |
| | | return result |
| | | |
| | | |
| | | |
| | | |
| | | |
New file |
| | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Fri May 24 15:33:12 2024 |
| | | |
| | | @author: BDYGS |
| | | """ |
| | | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Tue Apr 23 05:28:13 2024 |
| | | |
| | | @author: BDYGS |
| | | """ |
| | | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Sat Apr 6 04:01:04 2024 |
| | | |
| | | @author: BDYGS |
| | | """ |
| | | |
| | | import matplotlib.pyplot as plt |
| | | import numpy as np |
| | | import pandas as pd |
| | | import torch |
| | | import torch.nn as nn |
| | | from torch.autograd import Variable |
| | | from sklearn.preprocessing import StandardScaler, MinMaxScaler |
| | | from sklearn.metrics import mean_squared_error |
| | | from sklearn.metrics import mean_absolute_error |
| | | from torch.utils.data import TensorDataset |
| | | from tqdm import tqdm |
| | | |
| | | |
| | | |
| | | class Config(): |
| | | data_path = "C:\\Users\\ZMK\\Desktop\\GRU\çæ³å±±äº.csv" |
| | | timestep = 60 # æ¶é´æ¥é¿ï¼å°±æ¯å©ç¨å¤å°æ¶é´çªå£ |
| | | batch_size = 30 # æ¹æ¬¡å¤§å° |
| | | feature_size = 8 # æ¯ä¸ªæ¥é¿å¯¹åºçç¹å¾æ°éï¼è¿éåªä½¿ç¨1ç»´ï¼æ¯å¤©çé£é |
| | | hidden_size = 256 # éå±å¤§å° |
| | | output_size = 15 # ç±äºæ¯åè¾åºä»»å¡ï¼æç»è¾åºå±å¤§å°ä¸º1ï¼é¢æµæªæ¥1天é£é |
| | | num_layers = 2 # gruç屿° |
| | | epochs = 100 # è¿ä»£è½®æ° |
| | | best_loss = 0 # è®°å½æå¤± |
| | | learning_rate = 0.0003 # å¦ä¹ ç |
| | | |
| | | |
| | | config = Config() |
| | | |
| | | |
| | | def normalization(data,label): |
| | | mm_x=MinMaxScaler() # 导å
¥sklearnçé¢å¤çå®¹å¨ |
| | | mm_y=MinMaxScaler() |
| | | data=mm_x.fit_transform(data) # å¯¹æ°æ®åæ ç¾è¿è¡å½ä¸åçå¤ç |
| | | label=mm_y.fit_transform(label) |
| | | return data,label,mm_y |
| | | |
| | | |
| | | def split_windows(data,seq_len,output_size): |
| | | x=[] |
| | | y=[] |
| | | for i in range(len(data)-seq_len-1-output_size): # rangeçèå´éè¦å廿¶é´æ¥é¿å1 |
| | | _x=data[i:(i+seq_len),:] |
| | | _y=data[(i+seq_len):(i+seq_len+output_size),2:] #注æï¼ï¼ï¼è¿ä¸ªå°æ¹æ¯ålabelç |
| | | |
| | | x.append(_x) |
| | | y.append(_y) |
| | | print('split_windows_i:',i) |
| | | print(_x.shape,_y.shape) |
| | | x,y=np.array(x),np.array(y) |
| | | print('x.shape,y.shape=\n',x.shape,y.shape) |
| | | return x,y |
| | | |
| | | |
| | | |
| | | |
| | | def split_data(x,y,split_ratio): |
| | | |
| | | train_size=int(len(y)*split_ratio) |
| | | test_size=len(y)-train_size |
| | | |
| | | x_data=Variable(torch.Tensor(np.array(x))) |
| | | y_data=Variable(torch.Tensor(np.array(y))) |
| | | |
| | | x_train=Variable(torch.Tensor(np.array(x[0:train_size]))) |
| | | y_train=Variable(torch.Tensor(np.array(y[0:train_size]))) |
| | | y_test=Variable(torch.Tensor(np.array(y[train_size:len(y)]))) |
| | | x_test=Variable(torch.Tensor(np.array(x[train_size:len(x)]))) |
| | | |
| | | print('x_data.shape,y_data.shape,x_train.shape,y_train.shape,x_test.shape,y_test.shape:\n{}{}{}{}{}{}' |
| | | .format(x_data.shape,y_data.shape,x_train.shape,y_train.shape,x_test.shape,y_test.shape)) |
| | | |
| | | return x_data,y_data,x_train,y_train,x_test,y_test |
| | | |
| | | |
| | | def nash_sutcliffe_efficiency(y_true, y_pred): |
| | | """ |
| | | 计ç®Nash-Sutcliffe Efficiencyææ ã |
| | | åæ°: |
| | | y_true : array-like, çå®è§æµå¼ |
| | | y_pred : array-like, 颿µå¼ |
| | | è¿å: |
| | | nse : float, Nash-Sutcliffe Efficiency |
| | | """ |
| | | return 1 - np.sum((y_true - y_pred)**2) / np.sum((y_true - np.mean(y_true))**2) |
| | | |
| | | |
| | | |
| | | # 1.å è½½æ¶é´åºåæ°æ® |
| | | |
| | | df= pd.read_csv(config.data_path,parse_dates=["date"],index_col=[0]) |
| | | #parse_dateså°è¯¥åè§ä¸ºæ¶é´ç´¢å¼ |
| | | |
| | | print(df.shape) |
| | | |
| | | data = df.iloc[:,0:8] # 以ç¹å¾ä½ä¸ºæ°æ® |
| | | label = df.iloc[:,7] |
| | | data = data.values |
| | | label = label.values.reshape(-1,1) |
| | | |
| | | # torch.manual_seed(7) #设置çæéæºæ°çç§åï¼ä»¥ä¿è¯ä»£ç çå¯é夿§ |
| | | |
| | | data,label,mm_y=normalization(data,label) |
| | | |
| | | dataX,dataY = split_windows(data,config.timestep,config.output_size) |
| | | |
| | | x_data,y_data,x_train,y_train,x_test,y_test = split_data(dataX,dataY,0.8) |
| | | |
| | | # 5.å½¢æè®ç»æ°æ®é |
| | | train_data = TensorDataset(x_train,y_train) |
| | | test_data = TensorDataset(x_test,y_test) |
| | | |
| | | |
| | | # 6.å°æ°æ®å è½½æè¿ä»£å¨ |
| | | train_loader = torch.utils.data.DataLoader(train_data, |
| | | config.batch_size, |
| | | False) |
| | | |
| | | test_loader = torch.utils.data.DataLoader(test_data, |
| | | config.batch_size, |
| | | False) |
| | | |
| | | |
| | | # 7.å®ä¹GRUç½ç» |
| | | class GRU(nn.Module): |
| | | def __init__(self, feature_size, hidden_size, num_layers, output_size): |
| | | super(GRU, self).__init__() |
| | | self.hidden_size = hidden_size |
| | | self.output_size = output_size |
| | | # éå±å¤§å° |
| | | self.num_layers = num_layers # gru屿° |
| | | # feature_size为ç¹å¾ç»´åº¦ï¼å°±æ¯æ¯ä¸ªæ¶é´ç¹å¯¹åºçç¹å¾æ°éï¼è¿é为1 |
| | | self.gru = nn.GRU(feature_size, hidden_size, num_layers, dropout=0.8,batch_first=True) |
| | | self.fc1 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc2 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc3 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc4 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc5 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc6 = nn.Linear(self.hidden_size, self.output_size) |
| | | |
| | | def forward(self, x, hidden=None): |
| | | batch_size = x.size()[0] # è·åæ¹æ¬¡å¤§å° |
| | | |
| | | # åå§åéå±ç¶æ |
| | | if hidden is None: |
| | | h_0 = x.data.new(self.num_layers, batch_size, self.hidden_size).fill_(0).float() |
| | | else: |
| | | h_0 = hidden |
| | | |
| | | # GRUè¿ç® |
| | | output, h_0 = self.gru(x, h_0) |
| | | |
| | | # è·åGRUè¾åºçç»´åº¦ä¿¡æ¯ |
| | | batch_size, timestep, hidden_size = output.shape |
| | | |
| | | # å°outputåæ batch_size * timestep, hidden_dim |
| | | # output = output.reshape(-1, hidden_size) |
| | | |
| | | preds = [] |
| | | pred1, pred2, pred3 = self.fc1(output), self.fc2(output), self.fc3(output) |
| | | pred1, pred2, pred3 = pred1[:, -1, :], pred2[:, -1, :], pred3[:, -1, :] |
| | | pred4, pred5, pred6 = self.fc4(output), self.fc5(output), self.fc6(output) |
| | | pred4, pred5, pred6 = pred4[:, -1, :], pred5[:, -1, :], pred6[:, -1, :] |
| | | pred = torch.stack([pred1, pred2, pred3,pred4, pred5, pred6], dim=2) |
| | | |
| | | return pred |
| | | |
| | | |
| | | model = GRU(config.feature_size, config.hidden_size, config.num_layers, config.output_size) # å®ä¹GRUç½ç» |
| | | print(model) |
| | | loss_function = nn.MSELoss() # å®ä¹æå¤±å½æ° |
| | | optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) # å®ä¹ä¼åå¨ |
| | | |
| | | # 8.模åè®ç» |
| | | for epoch in range(config.epochs): |
| | | model.train() |
| | | running_loss = 0 |
| | | train_bar = tqdm(train_loader) # å½¢æè¿åº¦æ¡ |
| | | for data in train_bar: |
| | | x_train, y_train = data # è§£å
è¿ä»£å¨ä¸çXåY |
| | | optimizer.zero_grad() |
| | | y_train_pred = model(x_train) |
| | | |
| | | loss = loss_function(y_train_pred, y_train) |
| | | loss.backward() |
| | | optimizer.step() |
| | | |
| | | running_loss += loss.item() |
| | | train_bar.desc = "train epoch[{}/{}] loss:{:.6f}".format(epoch + 1, |
| | | config.epochs, |
| | | loss) |
| | | |
| | | print('Finished Training') |
| | | |
| | | model_name = 'GRU_YQS' |
| | | torch.save(model.state_dict(), 'C://Users//ZMK//Desktop//GRU/{}.pth'.format(model_name)) |
| | | |
| | | |
| | | model.eval() |
| | | |
| | | #模åéªè¯ï¼ç¨åå§æ°æ®åç®ä¸æ¬¡ |
| | | |
| | | df= pd.read_csv(config.data_path,parse_dates=["date"],index_col=[0]) |
| | | #parse_dateså°è¯¥åè§ä¸ºæ¶é´ç´¢å¼ |
| | | |
| | | data = df.iloc[:,0:8] # 以å个ç¹å¾ä½ä¸ºæ°æ® |
| | | label = df.iloc[:,7] |
| | | data = data.values |
| | | label = label.values.reshape(-1,1) |
| | | |
| | | |
| | | data,label,mm_y=normalization(data,label) |
| | | |
| | | dataX,dataY = split_windows(data,config.timestep,config.output_size) |
| | | |
| | | x_data,y_data,x_train,y_train,x_test,y_test = split_data(dataX,dataY,0.8) |
| | | |
| | | test_pre = model(x_data) |
| | | |
| | | with pd.ExcelWriter("C:\\Users\\ZMK\\Desktop\\GRU\GRU-YQS.xlsx", engine='openpyxl') as writer: |
| | | |
| | | for i in range(6): |
| | | test_pre_data = test_pre[:,0,i].data.numpy().reshape(-1,1) |
| | | y_test_pre = y_data[:,0,i].data.numpy().reshape(-1,1) |
| | | |
| | | print(test_pre_data.shape) |
| | | |
| | | test_pre_data_inv = mm_y.inverse_transform(test_pre_data) |
| | | |
| | | # print(test_pre_data_inv.shape) |
| | | y_test_inv =mm_y.inverse_transform(y_test_pre) |
| | | |
| | | |
| | | plt.figure(figsize=(10,5)) |
| | | plt.plot(y_test_inv) |
| | | plt.plot(test_pre_data_inv) |
| | | plt.legend(('real', 'predict'),fontsize='15') |
| | | plt.show() |
| | | |
| | | print('MAE/RMSE/NSE') |
| | | print(mean_absolute_error(y_test_inv, test_pre_data_inv)) |
| | | print(np.sqrt(mean_squared_error(y_test_inv, test_pre_data_inv))) |
| | | print(nash_sutcliffe_efficiency(y_test_inv, test_pre_data_inv)) |
| | | |
| | | y_test_inv = pd.DataFrame(y_test_inv, columns=[f'True Node {i+1}']) |
| | | test_pre_data_inv = pd.DataFrame(test_pre_data_inv, columns=[f'test Node {i+1}']) |
| | | |
| | | # å°ç»æä¿åå°ä¸åçå·¥ä½è¡¨ä¸ |
| | | test_pre_data_inv.to_excel(writer, sheet_name=f'True Node {i+1}', index=False) |
| | | y_test_inv.to_excel(writer, sheet_name=f'test Node {i+1}', index=False) |
| | | |
| | | |
| | | |
| | | |
New file |
| | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Sun May 26 02:15:11 2024 |
| | | |
| | | @author: BDYGS |
| | | """ |
| | | |
| | | |
| | | import matplotlib.pyplot as plt |
| | | import numpy as np |
| | | import pandas as pd |
| | | import torch |
| | | import torch.nn as nn |
| | | from torch.autograd import Variable |
| | | # import tushare as ts |
| | | from sklearn.preprocessing import StandardScaler, MinMaxScaler |
| | | from sklearn.metrics import mean_squared_error |
| | | from sklearn.metrics import mean_absolute_error |
| | | from torch.utils.data import TensorDataset |
| | | from tqdm import tqdm |
| | | |
| | | |
| | | |
| | | |
| | | class Config(): |
| | | # data_path = "C:\\Users\\ZMK\\Desktop\\GRU\æ°¸å®æ²³äº.csv" |
| | | timestep = 60 # æ¶é´æ¥é¿ï¼å°±æ¯å©ç¨å¤å°æ¶é´çªå£ |
| | | batch_size = 30 # æ¹æ¬¡å¤§å° |
| | | feature_size = 8 # æ¯ä¸ªæ¥é¿å¯¹åºçç¹å¾æ°éï¼è¿éåªä½¿ç¨1ç»´ï¼æ¯å¤©çé£é |
| | | hidden_size = 256 # éå±å¤§å° |
| | | output_size = 15 # ç±äºæ¯åè¾åºä»»å¡ï¼æç»è¾åºå±å¤§å°ä¸º1ï¼é¢æµæªæ¥1天é£é |
| | | num_layers = 2 # gruç屿° |
| | | epochs = 100 # è¿ä»£è½®æ° |
| | | best_loss = 0 # è®°å½æå¤± |
| | | learning_rate = 0.0003 # å¦ä¹ ç |
| | | # model_name = 'GRU_ZMK' # 模ååç§° |
| | | # save_path = 'C://Users//ZMK//Desktop//GRU//{}.pth'.format(model_name) # æä¼æ¨¡åä¿åè·¯å¾ |
| | | |
| | | config = Config() |
| | | |
| | | |
| | | def normalization(data,label): |
| | | mm_x=MinMaxScaler() # 导å
¥sklearnçé¢å¤çå®¹å¨ |
| | | mm_y=MinMaxScaler() |
| | | data=mm_x.fit_transform(data) # å¯¹æ°æ®åæ ç¾è¿è¡å½ä¸åçå¤ç |
| | | label=mm_y.fit_transform(label) |
| | | return data,label,mm_y |
| | | |
| | | |
| | | def split_windows(data,seq_len,output_size): |
| | | x=[] |
| | | y=[] |
| | | for i in range(len(data)-seq_len-1-output_size): # rangeçèå´éè¦å廿¶é´æ¥é¿å1 |
| | | _x=data[i:(i+seq_len),:] |
| | | _y=data[(i+seq_len):(i+seq_len+output_size),2:] #注æï¼ï¼ï¼è¿ä¸ªå°æ¹æ¯ålabelç |
| | | |
| | | x.append(_x) |
| | | y.append(_y) |
| | | print('split_windows_i:',i) |
| | | print(_x.shape,_y.shape) |
| | | x,y=np.array(x),np.array(y) |
| | | print('x.shape,y.shape=\n',x.shape,y.shape) |
| | | return x,y |
| | | |
| | | def split_windows_long(data,seq_len,output_size): |
| | | |
| | | print(len(data)) |
| | | x=[] |
| | | y=[] |
| | | for i in range(int(len(data)/output_size)-4): |
| | | a = i*output_size |
| | | # print(a) |
| | | _x=data[a:a+seq_len,:] |
| | | # print(_x.shape) |
| | | _y=data[a+seq_len:a+seq_len+output_size,2:] #注æï¼ï¼ï¼è¿ä¸ªå°æ¹æ¯ålabelç |
| | | # print(_y.shape) |
| | | x.append(_x) |
| | | y.append(_y) |
| | | print('split_windows_i:',i) |
| | | # print(_x,_y) |
| | | x,y=np.array(x),np.array(y) |
| | | print('x.shape,y.shape=\n',x.shape,y.shape) # (1035, 60, 4) (1035,) |
| | | return x,y |
| | | |
| | | |
| | | def nash_sutcliffe_efficiency(y_true, y_pred): |
| | | """ |
| | | 计ç®Nash-Sutcliffe Efficiencyææ ã |
| | | åæ°: |
| | | y_true : array-like, çå®è§æµå¼ |
| | | y_pred : array-like, 颿µå¼ |
| | | è¿å: |
| | | nse : float, Nash-Sutcliffe Efficiency |
| | | """ |
| | | return 1 - np.sum((y_true - y_pred)**2) / np.sum((y_true - np.mean(y_true))**2) |
| | | |
| | | |
| | | |
| | | # 7.å®ä¹GRUç½ç» |
| | | class GRU(nn.Module): |
| | | def __init__(self, feature_size, hidden_size, num_layers, output_size): |
| | | super(GRU, self).__init__() |
| | | self.hidden_size = hidden_size |
| | | self.output_size = output_size |
| | | # éå±å¤§å° |
| | | self.num_layers = num_layers # gru屿° |
| | | # feature_size为ç¹å¾ç»´åº¦ï¼å°±æ¯æ¯ä¸ªæ¶é´ç¹å¯¹åºçç¹å¾æ°éï¼è¿é为1 |
| | | self.gru = nn.GRU(feature_size, hidden_size, num_layers, dropout=0.8,batch_first=True) |
| | | self.fc1 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc2 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc3 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc4 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc5 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc6 = nn.Linear(self.hidden_size, self.output_size) |
| | | |
| | | def forward(self, x, hidden=None): |
| | | batch_size = x.size()[0] # è·åæ¹æ¬¡å¤§å° |
| | | |
| | | # åå§åéå±ç¶æ |
| | | if hidden is None: |
| | | h_0 = x.data.new(self.num_layers, batch_size, self.hidden_size).fill_(0).float() |
| | | else: |
| | | h_0 = hidden |
| | | |
| | | # GRUè¿ç® |
| | | output, h_0 = self.gru(x, h_0) |
| | | |
| | | # è·åGRUè¾åºçç»´åº¦ä¿¡æ¯ |
| | | batch_size, timestep, hidden_size = output.shape |
| | | |
| | | # å°outputåæ batch_size * timestep, hidden_dim |
| | | # output = output.reshape(-1, hidden_size) |
| | | |
| | | preds = [] |
| | | pred1, pred2, pred3 = self.fc1(output), self.fc2(output), self.fc3(output) |
| | | pred1, pred2, pred3 = pred1[:, -1, :], pred2[:, -1, :], pred3[:, -1, :] |
| | | pred4, pred5, pred6 = self.fc4(output), self.fc5(output), self.fc6(output) |
| | | pred4, pred5, pred6 = pred4[:, -1, :], pred5[:, -1, :], pred6[:, -1, :] |
| | | pred = torch.stack([pred1, pred2, pred3,pred4, pred5, pred6], dim=2) |
| | | |
| | | return pred |
| | | |
| | | model = GRU(config.feature_size, config.hidden_size, config.num_layers, config.output_size) # å®ä¹GRUç½ç» |
| | | print(model) |
| | | loss_function = nn.MSELoss() # å®ä¹æå¤±å½æ° |
| | | optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) # å®ä¹ä¼åå¨ |
| | | |
| | | # |
| | | model.load_state_dict(torch.load('C://Users//ZMK//Desktop//GRU//GRU_YDH.pth')) |
| | | |
| | | |
| | | model.eval() |
| | | #pre |
| | | |
| | | df_pre = pd.read_csv("C:\\Users\\ZMK\\Desktop\\GRU\\æ°¸å®æ²³äº-pre.csv",parse_dates=["date"],index_col=[0]) |
| | | |
| | | print(df_pre.shape) |
| | | |
| | | data_pre = df_pre.iloc[:,0:8] |
| | | |
| | | label_pre = df_pre.iloc[:,7] #label没æå®é
ä½ç¨ï¼ä¸»è¦ç¨ä½æ£åå缩æ¾çï¼ä¸åä¸è®¡ç® |
| | | |
| | | |
| | | data_pre = data_pre.values |
| | | |
| | | label_pre = label_pre.values.reshape(-1,1) |
| | | |
| | | data_pre,label_pre,mm_y_pre = normalization(data_pre,label_pre) |
| | | dataX_pre,dataY_pre = split_windows_long(data_pre,config.timestep,config.output_size) |
| | | |
| | | dataX_pre = Variable(torch.Tensor(np.array(dataX_pre))) |
| | | dataY_pre = Variable(torch.Tensor(np.array(dataY_pre))) |
| | | |
| | | print(dataY_pre.shape) |
| | | |
| | | test_pre = model(dataX_pre) |
| | | |
| | | print(test_pre.shape) |
| | | |
| | | |
| | | |
| | | |
| | | with pd.ExcelWriter("C:\\Users\\ZMK\\Desktop\\GRU\\GRU-pre-ydh.xlsx", engine='openpyxl') as writer: |
| | | |
| | | for i in range(6): |
| | | test_pre_data = test_pre[:,:,i].data.numpy().reshape(-1,1) |
| | | y_test_pre = dataY_pre[:,:,i].data.numpy().reshape(-1,1) |
| | | |
| | | test_pre_data_inv = mm_y_pre.inverse_transform(test_pre_data) |
| | | y_test_inv =mm_y_pre.inverse_transform(y_test_pre) |
| | | |
| | | |
| | | # plt.figure(figsize=(10,5)) |
| | | # plt.plot(y_test_inv) |
| | | # plt.plot(test_pre_data_inv) |
| | | # plt.legend(('real', 'predict'),fontsize='15') |
| | | # plt.show() |
| | | |
| | | print('MAE/RMSE/NSE') |
| | | print(mean_absolute_error(y_test_inv, test_pre_data_inv)) |
| | | print(np.sqrt(mean_squared_error(y_test_inv, test_pre_data_inv))) |
| | | print(nash_sutcliffe_efficiency(y_test_inv, test_pre_data_inv)) |
| | | |
| | | y_test_inv = pd.DataFrame(y_test_inv, columns=[f'True Node {i+1}']) |
| | | test_pre_data_inv = pd.DataFrame(test_pre_data_inv, columns=[f'pre Node {i+1}']) |
| | | |
| | | # å°ç»æä¿åå°ä¸åçå·¥ä½è¡¨ä¸ |
| | | test_pre_data_inv.to_excel(writer, sheet_name=f'True Node {i+1}', index=False) |
| | | y_test_inv.to_excel(writer, sheet_name=f'pre Node {i+1}', index=False) |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
New file |
| | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Sun May 26 02:15:11 2024 |
| | | |
| | | @author: BDYGS |
| | | """ |
| | | |
| | | |
| | | import matplotlib.pyplot as plt |
| | | import numpy as np |
| | | import pandas as pd |
| | | import torch |
| | | import torch.nn as nn |
| | | from torch.autograd import Variable |
| | | # import tushare as ts |
| | | from sklearn.preprocessing import StandardScaler, MinMaxScaler |
| | | from sklearn.metrics import mean_squared_error |
| | | from sklearn.metrics import mean_absolute_error |
| | | from torch.utils.data import TensorDataset |
| | | from tqdm import tqdm |
| | | from datetime import datetime |
| | | |
| | | import DataTask |
| | | |
| | | |
| | | path_dir_left ="C://Users//ZMK//Desktop//GRU//" |
| | | |
| | | path_dir_right ="C:\\Users\\ZMK\\Desktop\\GRU\\" |
| | | |
| | | |
| | | |
| | | class Config(): |
| | | # data_path = "C:\\Users\\ZMK\\Desktop\\GRU\æ°¸å®æ²³äº.csv" |
| | | timestep = 60 # æ¶é´æ¥é¿ï¼å°±æ¯å©ç¨å¤å°æ¶é´çªå£ |
| | | batch_size = 30 # æ¹æ¬¡å¤§å° |
| | | feature_size = 8 # æ¯ä¸ªæ¥é¿å¯¹åºçç¹å¾æ°éï¼è¿éåªä½¿ç¨1ç»´ï¼æ¯å¤©çé£é |
| | | hidden_size = 256 # éå±å¤§å° |
| | | output_size = 15 # ç±äºæ¯åè¾åºä»»å¡ï¼æç»è¾åºå±å¤§å°ä¸º1ï¼é¢æµæªæ¥1天é£é |
| | | num_layers = 2 # gruç屿° |
| | | epochs = 100 # è¿ä»£è½®æ° |
| | | best_loss = 0 # è®°å½æå¤± |
| | | learning_rate = 0.0003 # å¦ä¹ ç |
| | | # model_name = 'GRU_ZMK' # 模ååç§° |
| | | # save_path = 'C://Users//ZMK//Desktop//GRU//{}.pth'.format(model_name) # æä¼æ¨¡åä¿åè·¯å¾ |
| | | |
| | | config = Config() |
| | | |
| | | |
| | | def normalization(data,label): |
| | | mm_x=MinMaxScaler() # 导å
¥sklearnçé¢å¤çå®¹å¨ |
| | | mm_y=MinMaxScaler() |
| | | data=mm_x.fit_transform(data) # å¯¹æ°æ®åæ ç¾è¿è¡å½ä¸åçå¤ç |
| | | label=mm_y.fit_transform(label) |
| | | return data,label,mm_y |
| | | |
| | | |
| | | def split_windows(data,seq_len,output_size): |
| | | x=[] |
| | | y=[] |
| | | for i in range(len(data)-seq_len-1-output_size): # rangeçèå´éè¦å廿¶é´æ¥é¿å1 |
| | | _x=data[i:(i+seq_len),:] |
| | | _y=data[(i+seq_len):(i+seq_len+output_size),2:] #注æï¼ï¼ï¼è¿ä¸ªå°æ¹æ¯ålabelç |
| | | |
| | | x.append(_x) |
| | | y.append(_y) |
| | | print('split_windows_i:',i) |
| | | print(_x.shape,_y.shape) |
| | | x,y=np.array(x),np.array(y) |
| | | print('x.shape,y.shape=\n',x.shape,y.shape) |
| | | return x,y |
| | | |
| | | def split_windows_long(data,seq_len,output_size): |
| | | |
| | | print(len(data)) |
| | | x=[] |
| | | y=[] |
| | | for i in range(int(len(data)/output_size)-4): |
| | | a = i*output_size |
| | | # print(a) |
| | | _x=data[a:a+seq_len,:] |
| | | # print(_x.shape) |
| | | _y=data[a+seq_len:a+seq_len+output_size,2:] #注æï¼ï¼ï¼è¿ä¸ªå°æ¹æ¯ålabelç |
| | | # print(_y.shape) |
| | | x.append(_x) |
| | | y.append(_y) |
| | | print('split_windows_i:',i) |
| | | # print(_x,_y) |
| | | x,y=np.array(x),np.array(y) |
| | | print('x.shape,y.shape=\n',x.shape,y.shape) # (1035, 60, 4) (1035,) |
| | | return x,y |
| | | |
| | | |
| | | def nash_sutcliffe_efficiency(y_true, y_pred): |
| | | """ |
| | | 计ç®Nash-Sutcliffe Efficiencyææ ã |
| | | åæ°: |
| | | y_true : array-like, çå®è§æµå¼ |
| | | y_pred : array-like, 颿µå¼ |
| | | è¿å: |
| | | nse : float, Nash-Sutcliffe Efficiency |
| | | """ |
| | | return 1 - np.sum((y_true - y_pred)**2) / np.sum((y_true - np.mean(y_true))**2) |
| | | |
| | | |
| | | |
| | | # 7.å®ä¹GRUç½ç» |
| | | class GRU(nn.Module): |
| | | def __init__(self, feature_size, hidden_size, num_layers, output_size): |
| | | super(GRU, self).__init__() |
| | | self.hidden_size = hidden_size |
| | | self.output_size = output_size |
| | | # éå±å¤§å° |
| | | self.num_layers = num_layers # gru屿° |
| | | # feature_size为ç¹å¾ç»´åº¦ï¼å°±æ¯æ¯ä¸ªæ¶é´ç¹å¯¹åºçç¹å¾æ°éï¼è¿é为1 |
| | | self.gru = nn.GRU(feature_size, hidden_size, num_layers, dropout=0.8,batch_first=True) |
| | | self.fc1 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc2 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc3 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc4 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc5 = nn.Linear(self.hidden_size, self.output_size) |
| | | self.fc6 = nn.Linear(self.hidden_size, self.output_size) |
| | | |
| | | def forward(self, x, hidden=None): |
| | | batch_size = x.size()[0] # è·åæ¹æ¬¡å¤§å° |
| | | |
| | | # åå§åéå±ç¶æ |
| | | if hidden is None: |
| | | h_0 = x.data.new(self.num_layers, batch_size, self.hidden_size).fill_(0).float() |
| | | else: |
| | | h_0 = hidden |
| | | |
| | | # GRUè¿ç® |
| | | output, h_0 = self.gru(x, h_0) |
| | | |
| | | # è·åGRUè¾åºçç»´åº¦ä¿¡æ¯ |
| | | batch_size, timestep, hidden_size = output.shape |
| | | |
| | | # å°outputåæ batch_size * timestep, hidden_dim |
| | | # output = output.reshape(-1, hidden_size) |
| | | |
| | | preds = [] |
| | | pred1, pred2, pred3 = self.fc1(output), self.fc2(output), self.fc3(output) |
| | | pred1, pred2, pred3 = pred1[:, -1, :], pred2[:, -1, :], pred3[:, -1, :] |
| | | pred4, pred5, pred6 = self.fc4(output), self.fc5(output), self.fc6(output) |
| | | pred4, pred5, pred6 = pred4[:, -1, :], pred5[:, -1, :], pred6[:, -1, :] |
| | | pred = torch.stack([pred1, pred2, pred3,pred4, pred5, pred6], dim=2) |
| | | |
| | | return pred |
| | | |
| | | |
| | | |
| | | #æ ¹æ®ç¼å·è·åä¸åç颿µæ¨¡å |
| | | def getModelName(well_num): |
| | | if well_num in DataTask.arr1: |
| | | |
| | | return 'GRU_YDH.pth' |
| | | |
| | | else : |
| | | return 'GRU_YQS.pth' |
| | | |
| | | |
| | | |
| | | #åå
¥æ°æ®å°csv |
| | | def write_csv(model_name , np_result,csv_path): |
| | | |
| | | df ="" |
| | | if model_name =='GRU_YDH.pth': |
| | | df = pd.DataFrame({"date":np_result[:,0], "Myrainfall":np_result[:,1], |
| | | "flowrate":np_result[:,2], "LQWB":np_result[:,3], |
| | | "ZSSC":np_result[:,4], "WTY":np_result[:,5], |
| | | "LSH":np_result[:,6], "HZZ":np_result[:,7],"GC":np_result[:,8] |
| | | }) |
| | | |
| | | else: |
| | | df = pd.DataFrame({"date":np_result[:,0], "HDrainfall":np_result[:,1], |
| | | "flowrate":np_result[:,2], "SXC":np_result[:,3], |
| | | "XM1":np_result[:,4], "XM2":np_result[:,5], |
| | | "SYSC":np_result[:,6], "SJYY":np_result[:,7],"BW":np_result[:,8] |
| | | }) |
| | | |
| | | df.to_csv(csv_path, index=False) |
| | | |
| | | |
| | | #è¿è¡é¢æµæ¨¡å |
| | | def runPredictModel(well_num): |
| | | |
| | | data ="" |
| | | csv_path = "" |
| | | excel_path = "" |
| | | #颿µæ¨¡å |
| | | model_name = getModelName(well_num) |
| | | |
| | | #æ°¸å®æ²³æ¨¡å |
| | | if model_name == 'GRU_YDH.pth': |
| | | |
| | | csv_path = path_dir_right + "æ°¸å®æ²³äº-pre.csv" |
| | | excel_path = path_dir_right + "æ°¸å®æ²³äº-颿µç»æ.xlsx" |
| | | |
| | | data = DataTask.get_ydh15_real_data() |
| | | else: |
| | | |
| | | csv_path = path_dir_right + "çæ³å±±äº-pre.csv" |
| | | excel_path = path_dir_right + "çæ³å±±äº-颿µç»æ.xlsx" |
| | | |
| | | data = DataTask.getyqs15_real_data() |
| | | |
| | | |
| | | #åå
¥æ°æ®å°csv |
| | | write_csv(model_name,data,csv_path) |
| | | |
| | | model_path = path_dir_left + model_name |
| | | |
| | | model = GRU(config.feature_size, config.hidden_size, config.num_layers, config.output_size) # å®ä¹GRUç½ç» |
| | | |
| | | loss_function = nn.MSELoss() # å®ä¹æå¤±å½æ° |
| | | optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) # å®ä¹ä¼åå¨ |
| | | |
| | | model.load_state_dict(torch.load(model_path)) |
| | | model.eval() |
| | | |
| | | |
| | | #CSV æä»¶æ°æ® è·¯å¾ |
| | | |
| | | # "C:\\Users\\ZMK\\Desktop\\GRU\\æ°¸å®æ²³äº-pre.csv" |
| | | df_pre = pd.read_csv(csv_path , parse_dates=["date"],index_col=[0]) |
| | | |
| | | |
| | | data_pre = df_pre.iloc[:,0:8] |
| | | |
| | | label_pre = df_pre.iloc[:,7] #label没æå®é
ä½ç¨ï¼ä¸»è¦ç¨ä½æ£åå缩æ¾çï¼ä¸åä¸è®¡ç® |
| | | |
| | | data_pre = data_pre.values |
| | | |
| | | label_pre = label_pre.values.reshape(-1,1) |
| | | |
| | | data_pre,label_pre,mm_y_pre = normalization(data_pre,label_pre) |
| | | dataX_pre,dataY_pre = split_windows_long(data_pre,config.timestep,config.output_size) |
| | | |
| | | dataX_pre = Variable(torch.Tensor(np.array(dataX_pre))) |
| | | dataY_pre = Variable(torch.Tensor(np.array(dataY_pre))) |
| | | |
| | | test_pre = model(dataX_pre) |
| | | |
| | | |
| | | with pd.ExcelWriter( excel_path , engine='openpyxl') as writer: |
| | | |
| | | for i in range(6): |
| | | test_pre_data = test_pre[:,:,i].data.numpy().reshape(-1,1) |
| | | y_test_pre = dataY_pre[:,:,i].data.numpy().reshape(-1,1) |
| | | |
| | | test_pre_data_inv = mm_y_pre.inverse_transform(test_pre_data) |
| | | y_test_inv =mm_y_pre.inverse_transform(y_test_pre) |
| | | |
| | | |
| | | |
| | | y_test_inv = pd.DataFrame(y_test_inv, columns=[f'True Node {i+1}']) |
| | | test_pre_data_inv = pd.DataFrame(test_pre_data_inv, columns=[f'pre Node {i+1}']) |
| | | |
| | | # å°ç»æä¿åå°ä¸åçå·¥ä½è¡¨ä¸ |
| | | test_pre_data_inv.to_excel(writer, sheet_name=f'True Node {i+1}', index=False) |
| | | y_test_inv.to_excel(writer, sheet_name=f'pre Node {i+1}', index=False) |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | |
| | | from flask import jsonify |
| | | from flask import request |
| | | from flask_cors import CORS |
| | | import sys |
| | | import numpy as np |
| | | import pandas as pd |
| | | import flopy |
| | | import flopy.utils.binaryfile as bf |
| | | import csv |
| | | import time |
| | | from openpyxl import load_workbook |
| | | import os |
| | | import shutil |
| | | import json |
| | | import Base as base |
| | | import CalHead |
| | | import Predict |
| | | import json |
| | | import ModelPeriod |
| | | import AchiveReport as achiveReport |
| | | import BigData |
| | | import OpenExcel |
| | | import DataTransf |
| | | import DataTask |
| | | |
| | | # Flask彿°æ¥æ¶ä¸ä¸ªåæ°__name__ï¼å®ä¼æåç¨åºæå¨çå
|
| | | app = Flask(__name__) |
| | |
| | | #æ²³æµcellçæ°é |
| | | riverCellSize = 109 |
| | | |
| | | iboundGroupSize = 5 |
| | | iboundGroup={1:[1,86],2:[87,111],3:[112,142],4:[143,170],5:[171,240]} |
| | | iboundGroupSize = 10 |
| | | iboundGroup={1:[1,86],2:[87,111],3:[112,142],4:[143,170],5:[171,240], |
| | | 6:[241,282],7:[283,354],8:[355,393],9:[394,436],10:[437,453]} |
| | | |
| | | iboundGroup3Size = 5 |
| | | iboundGroup3={1:[241,282],2:[283,354],3:[355,393],4:[394,436],5:[437,453]} |
| | | iboundName =["西侧æµå
¥","å鍿µå
¥","ä¸å鍿µå
¥","ä¸é¨æµåº","å鍿µåº", |
| | | "å±±åºæµå
¥","æ°¸å®æ²³æè£æµå
¥","é»åºé«ä¸½æè£æµå
¥","å
«å®å±±æè£æµå
¥","æææ¹æè£å
¥æµ"] |
| | | |
| | | |
| | | riverGroupSize = 4 |
| | | riverGroup={1:[454,479],2:[480,505],3:[506,527],4:[528,562]} |
| | |
| | | def getModel(model_name): |
| | | model_ws="" |
| | | if not model_name: |
| | | model_ws = "202001_202212" |
| | | model_ws = base.not_allowed_model |
| | | else: |
| | | model_ws = base.model_dir + model_name |
| | | |
| | |
| | | @app.route('/baseparam/', methods=['GET']) |
| | | def baseparam(): |
| | | |
| | | model_name = request.args.get('model_name') |
| | | model_name = request.args.get('model_name') |
| | | ml= getModel(model_name) |
| | | nrclp = ml.get_nrow_ncol_nlay_nper() |
| | | dict = {"Row": nrclp[0], "Column": nrclp[1], |
| | |
| | | |
| | | months = ModelPeriod.get_months_in_range_ym(start_time, end_time) |
| | | dict["months"]=months |
| | | print(jsondata) |
| | | if "initHeader" in jsondata: |
| | | dict["initHead"] = jsondata["initHeader"] |
| | | else: |
| | | dict["initHead"] = "" |
| | | return jsonify(dict) |
| | | |
| | | #ä¿ååå§æ°´å¤´ |
| | | @app.route('/saveInitHead', methods=['POST']) |
| | | def saveInitHead(): |
| | | jsondata = request.get_json() |
| | | model_name = str(jsondata['model_name']) |
| | | initHeader = str(jsondata['initHead']) |
| | | |
| | | if model_name == base.not_allowed_model or model_name in base.archive_models: |
| | | return jsonify("æ¬æ¨¡å为éªè¯æ¨¡å,ä¸å
许修æ¹ï¼") |
| | | |
| | | jsondata= CalHead.get_model_json(model_name) |
| | | jsondata["initHeader"] = initHeader |
| | | |
| | | jpath = base.model_dir + model_name +"\\prediction.json" |
| | | with open(jpath, "w",encoding='utf-8') as outfile: |
| | | json.dump(jsondata, outfile,ensure_ascii=False) |
| | | |
| | | return jsonify("ä¿ååå§æ°´å¤´æåï¼") |
| | | |
| | | #读åwelæä»¶ åæ°ä¸º Period |
| | | @app.route('/welList/', methods=['GET']) |
| | |
| | | |
| | | result_len = len(result) |
| | | |
| | | ibound_segment={} |
| | | |
| | | if layerparam == '1': |
| | | #è¾¹ç |
| | | |
| | | ibound_segment={"1":[0,85],"2":[86,110],"3":[111,141],"4":[142,169],"5":[170,239]} |
| | | |
| | | for i in range(0, 240): |
| | | iboundarray.append(result[i]) |
| | | #æ²³æµ |
| | | #æ²³æµ |
| | | for i in range(453, 562): |
| | | riverarray.append(result[i]) |
| | | |
| | |
| | | welarray.append(result[i]) |
| | | |
| | | elif layerparam == '3': |
| | | |
| | | ibound_segment={"6":[0,41],"7":[42,113],"8":[114,152],"9":[153,195],"10":[196,212]} |
| | | |
| | | for i in range(240, 453): |
| | | iboundarray.append(result[i]) |
| | | |
| | | ibounddict = {"name": "ibound", "data": iboundarray,"segment":ibound_segment} |
| | | |
| | | ibounddict = {"name": "ibound", "data": iboundarray} |
| | | riverdict = {"name": "river", "data": riverarray} |
| | | riversgement={"1":[0,25],"2":[26,51],"3":[52,73],"4":[74,108]} |
| | | riverdict = {"name": "river", "data": riverarray,"segment":riversgement} |
| | | |
| | | |
| | | weldict = {"name": "wel", "data": welarray} |
| | | |
| | |
| | | data.append(ibounddict) |
| | | data.append(weldict) |
| | | return jsonify(data) |
| | | |
| | | |
| | | #读åå个äºçæ°æ® |
| | | @app.route('/wel/', methods=['GET']) |
| | |
| | | # model_name = request.args.get('model_name') |
| | | model_name = str(json['model_name']) |
| | | |
| | | if model_name == base.not_allowed_model or model_name in base.archive_models: |
| | | return jsonify("æ¬æ¨¡å为éªè¯æ¨¡å,ä¸å
许修æ¹ï¼") |
| | | |
| | | |
| | | ml= getModel(model_name) |
| | | |
| | |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | flopy.modflow.ModflowWel(ml,stress_period_data=lrcq) |
| | | flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,dtype=ml.wel.dtype, |
| | | options=ml.wel.options, |
| | | stress_period_data=lrcq) |
| | | ml.write_input() |
| | | |
| | | return jsonify("æ°æ®æ´æ°å®æ¯ï¼") |
| | | |
| | | |
| | | #读å
¥çµåè¡¨æ ¼æä»¶çæwel æä»¶ |
| | | @app.route('/cellFileInput', methods=['POST']) |
| | | def cellFileInput(): |
| | | |
| | | path ='C:\\Users\\ZMK\\Desktop\\å¾
åé\\cellæä»¶.xlsx' |
| | | |
| | | data = get_cell_data(path) |
| | | |
| | | lrcq= get_cell_struct(data["excel1"],data["excel2"],data["excel3"]) |
| | | |
| | | model_name = request.args.get('model_name') |
| | | |
| | | ml= getModel(model_name) |
| | | |
| | | flopy.modflow.ModflowWel(ml,stress_period_data=lrcq) |
| | | ml.write_input() |
| | | |
| | | return jsonify("sucess") |
| | | |
| | | |
| | | def get_cell_struct(excel1,excel2,excel3): |
| | | lrcq={} |
| | | |
| | | #卿æ°é |
| | | period = 7 |
| | | start_row_index = 1 |
| | | |
| | | #è¾¹çæ°æ® excel |
| | | for col in range (0,period): |
| | | array =[] |
| | | for row in range(start_row_index, len(excel1)): |
| | | |
| | | arr = [excel1[row][2]-1,excel1[row][3]-1,excel1[row][4]-1,excel1[row][6+col]] |
| | | array.append(arr) |
| | | lrcq[col]= array |
| | | |
| | | #æ²³æµæ°æ® excel |
| | | for col in range (0,period): |
| | | array =[] |
| | | for row in range(start_row_index, len(excel2)): |
| | | |
| | | arr = [excel2[row][2]-1,excel2[row][3]-1,excel2[row][4]-1,excel2[row][6+col]] |
| | | array.append(arr) |
| | | |
| | | lrcq[col].extend(array) |
| | | |
| | | #æ½æ°´æ°æ® excel |
| | | for col in range (0,period): |
| | | |
| | | array =[] |
| | | for row in range(start_row_index, len(excel3)): |
| | | |
| | | arr = [excel3[row][1]-1,excel3[row][2]-1,excel3[row][3]-1,excel3[row][8+col]] |
| | | array.append(arr) |
| | | |
| | | lrcq[col].extend(array) |
| | | |
| | | return lrcq |
| | | |
| | | |
| | | |
| | | #è·åcellæä»¶ |
| | | #file_path æä»¶çè·¯å¾ |
| | | def get_cell_data(file_path): |
| | | |
| | | workbook = load_workbook(file_path) |
| | | sheetnames = workbook.get_sheet_names() |
| | | #read first sheet |
| | | sheet1 = workbook[sheetnames[0]] |
| | | sheet2 = workbook[sheetnames[1]] |
| | | sheet3 = workbook[sheetnames[2]] |
| | | |
| | | excel1 =[] |
| | | excel2=[] |
| | | excel3=[] |
| | | # éåè¯»åæ´ä¸ªå·¥ä½è¡¨ |
| | | for row in sheet1.iter_rows(values_only=True): |
| | | array=[] |
| | | for cell in row: |
| | | array.append(cell) |
| | | excel1.append(array) |
| | | |
| | | for row in sheet2.iter_rows(values_only=True): |
| | | array=[] |
| | | for cell in row: |
| | | array.append(cell) |
| | | excel2.append(array) |
| | | |
| | | for row in sheet3.iter_rows(values_only=True): |
| | | array=[] |
| | | for cell in row: |
| | | array.append(cell) |
| | | excel3.append(array) |
| | | |
| | | # å
³éExcelæä»¶ |
| | | workbook.close() |
| | | data={"excel1":excel1,"excel2":excel2,"excel3":excel3} |
| | | |
| | | return data |
| | | |
| | | |
| | | |
| | | #读åwelæä»¶ åæ°ä¸º Period |
| | | @app.route('/iboundList/', methods=['GET']) |
| | | def iboundList(): |
| | | |
| | | return jsonify(iboundGroupSize) |
| | | return jsonify(iboundName) |
| | | |
| | | |
| | | #è¾¹ççåç»æ°æ® |
| | |
| | | data = json['data'] |
| | | |
| | | model_name = json['model_name'] |
| | | |
| | | if model_name == base.not_allowed_model or model_name in base.archive_models: |
| | | return jsonify("æ¬æ¨¡å为éªè¯æ¨¡å,ä¸å
许修æ¹ï¼") |
| | | |
| | | ml= getModel(model_name) |
| | | |
| | | index = iboundGroup[no] |
| | |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | flopy.modflow.ModflowWel(ml,stress_period_data=lrcq) |
| | | flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb, |
| | | dtype=ml.wel.dtype, |
| | | options=ml.wel.options, |
| | | stress_period_data=lrcq) |
| | | ml.write_input() |
| | | return jsonify("æ°æ®æ´æ°å®æ¯ï¼") |
| | | |
| | |
| | | end_index = index[1] |
| | | model_name = json['model_name'] |
| | | |
| | | if model_name == base.not_allowed_model or model_name in base.archive_models: |
| | | return jsonify("æ¬æ¨¡å为éªè¯æ¨¡å,ä¸å
许修æ¹ï¼") |
| | | |
| | | ml= getModel(model_name) |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | flopy.modflow.ModflowWel(ml,stress_period_data=lrcq) |
| | | |
| | | flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb, |
| | | dtype=ml.wel.dtype, |
| | | options=ml.wel.options, |
| | | stress_period_data=lrcq) |
| | | ml.write_input() |
| | | return jsonify("æ°æ®æ´æ°å®æ¯ï¼") |
| | | |
| | |
| | | return jsonify(result) |
| | | |
| | | |
| | | #éæ°´æ°æ®ä¿®æ¹ |
| | | # @app.route('/precipitationInput', methods=['POST']) |
| | | # def precipitationInput(): |
| | | |
| | | # json = request.get_json() |
| | | # model_name= str(json['model_name']) |
| | | # period = int(json['period']) |
| | | # #æåºçåºå卿å表 json |
| | | # data = json['data'] |
| | | # dict = {} |
| | | # for i in range(len(data)): |
| | | # q1 = data[i]['Q1'] |
| | | # q2 = data[i]['Q2'] |
| | | # dict[q1] = q2 |
| | | |
| | | # ml= getModel(model_name) |
| | | |
| | | # item = ml.rch.rech.__getitem__(kper=period) |
| | | # array2d = item.get_value() |
| | | |
| | | # count = 0 |
| | | |
| | | # array2d_len = len(array2d) |
| | | |
| | | # for i in range(array2d_len): |
| | | |
| | | # array_len = len(array2d[i]) |
| | | |
| | | # for j in range(array_len): |
| | | |
| | | # va = str(array2d[i][j]) |
| | | # if va in dict: |
| | | # count += 1 |
| | | # array2d[i][j] = float(dict[va]) |
| | | |
| | | # ml.rch.rech.__setitem__(key=period, value=array2d) |
| | | |
| | | # rch = flopy.modflow.ModflowRch(ml, rech=ml.rch.rech) |
| | | # rch.write_file(check=False) |
| | | # #ml.write_input() |
| | | |
| | | # return jsonify("éæ°´åæ°ä¿®æ¹å®æ¯ï¼") |
| | | |
| | | |
| | | @app.route('/precipitationInput', methods=['POST']) |
| | | def precipitationInput(): |
| | | |
| | | json = request.get_json() |
| | | model_name= str(json['model_name']) |
| | | if model_name == base.not_allowed_model or model_name in base.archive_models: |
| | | return jsonify("æ¬æ¨¡å为éªè¯æ¨¡å,ä¸å
许修æ¹ï¼") |
| | | period = int(json['period']) |
| | | #æåºçåºå卿å表 json |
| | | data = json['data'] |
| | |
| | | |
| | | tuples= areas[key] |
| | | zblen= len(tuples) |
| | | values = float(dict[key]) |
| | | values = round(float(dict[key]),8) |
| | | for i in range(zblen): |
| | | x = tuples[i][0] |
| | | y = tuples[i][1] |
| | |
| | | |
| | | ml.rch.rech.__setitem__(key=period, value=array2d) |
| | | |
| | | rch = flopy.modflow.ModflowRch(ml, rech = ml.rch.rech) |
| | | rch = flopy.modflow.ModflowRch(ml,nrchop=ml.rch.nrchop, |
| | | ipakcb=ml.rch.ipakcb, |
| | | rech=ml.rch.rech, |
| | | irch =ml.rch.irch) |
| | | rch.write_file(check=False) |
| | | # ml.write_input() |
| | | |
| | | return jsonify("éæ°´åæ°ä¿®æ¹å®æ¯ï¼") |
| | | |
| | | #导å
¥csvæä»¶ |
| | | @app.route('/precipitationInputFile', methods=['POST']) |
| | | def precipitationInputFile(): |
| | | |
| | | model_name = request.args.get('model_name') |
| | | ml= getModel(model_name) |
| | | save_path = 'C:/Users/ZMK/Desktop/test1/' + "1111.xlsx" |
| | | file = request.files.get('file') |
| | | |
| | | if file: |
| | | file.save(save_path) |
| | | |
| | | #è·å读åçexcel è¡¨æ ¼æ°æ® |
| | | stations = get_station_struct(save_path) |
| | | |
| | | #循ç¯å¨æ |
| | | #perd卿åé |
| | | #array2d æ¯ä¸ªå¨æçäºç»´æ°ç» |
| | | for perd in range(0,36): |
| | | period = perd |
| | | item = ml.rch.rech.__getitem__(kper=period) |
| | | array2d = item.get_value() |
| | | |
| | | array2d_len = len(array2d) |
| | | count = 0 |
| | | #循ç¯ç«ç¹å°ä¸ä¸ªperiodçææstationsè¿è¡åå
¸åå¨ |
| | | dict = {} |
| | | for k in range(0,len(stations)): |
| | | row = stations[k]["row"] |
| | | column = stations[k]["column"] |
| | | |
| | | data_old = array2d[row][column] |
| | | data_new = stations[k]["data"][perd] |
| | | dict[data_old]= data_new |
| | | |
| | | |
| | | #循ç¯è®¾ç½®æ¯ä¸ªperiod çå¼ |
| | | for i in range(array2d_len): |
| | | |
| | | array_len = len(array2d[i]) |
| | | |
| | | for j in range(array_len): |
| | | |
| | | va = str(array2d[i][j]) |
| | | if va in dict: |
| | | array2d[i][j] = float(dict[va]) |
| | | |
| | | #å°array2déæ°set 对åºç item 卿å
|
| | | ml.rch.rech.__setitem__(key=period, value=array2d) |
| | | |
| | | rch = flopy.modflow.ModflowRch(ml, rech=ml.rch.rech) |
| | | rch.write_file(check=False) |
| | | # ml.write_input() |
| | | return 'æä»¶ä¸ä¼ æå' |
| | | else: |
| | | return 'ä¸ä¼ å¤±è´¥ï¼æªéæ©æä»¶' |
| | | |
| | | |
| | | #è·åç«ç¹çæ°æ®ï¼æé æ°æ®ç»æ |
| | | #file_path æä»¶çè·¯å¾ |
| | | def get_station_struct(file_path): |
| | | |
| | | workbook = load_workbook(file_path) |
| | | sheetnames = workbook.get_sheet_names() |
| | | #read first sheet |
| | | sheet = workbook[sheetnames[0]] |
| | | |
| | | array2d_excel=[] |
| | | # éåè¯»åæ´ä¸ªå·¥ä½è¡¨ |
| | | for row in sheet.iter_rows(values_only=True): |
| | | array=[] |
| | | for cell in row: |
| | | array.append(cell) |
| | | array2d_excel.append(array) |
| | | # å
³éExcelæä»¶ |
| | | workbook.close() |
| | | |
| | | #æ°æ®çå¼å§ä¸æ |
| | | data_start_index=6 |
| | | #ä¿¡æ¯çå¼å§è¡å· |
| | | start_row_index = 1 |
| | | #åå¨ç«ç¹ä¿¡æ¯ |
| | | stations = [] |
| | | for i in range (start_row_index,len(array2d_excel)): |
| | | st={"name":array2d_excel[i][1],"row":array2d_excel[i][4],"column":array2d_excel[i][5]} |
| | | data=[] |
| | | for j in range(data_start_index,len(array2d_excel[i])): |
| | | cell_data = array2d_excel[i][j] |
| | | cell_data= cell_data/100/30*0.15 |
| | | data.append(round(cell_data, 6)) |
| | | st["data"]= data |
| | | stations.append(st) |
| | | |
| | | return stations |
| | | |
| | | |
| | | #è¿è¡æ¨¡å |
| | |
| | | def runModel(): |
| | | model_name = request.args.get('model_name') |
| | | |
| | | msg= Predict.run_model_predict(model_name) |
| | | #导åºcsvæä»¶ |
| | | csvpath = CalHead.exportCsV(model_name) |
| | | if model_name == base.not_allowed_model or model_name in base.archive_models: |
| | | return jsonify("æ¬æ¨¡å为éªè¯æ¨¡å,ä¸å
许修æ¹ï¼") |
| | | |
| | | #æ´æ°æ¨¡åä¸ç»´ç½æ ¼é
ç½® |
| | | dicts= Predict.run_model_predict(model_name) |
| | | if dicts["code"] == 400: |
| | | return dicts["msg"] |
| | | #导åºcsvæä»¶ |
| | | CalHead.exportCsV(model_name) |
| | | |
| | | # #æ´æ°æ¨¡åä¸ç»´ç½æ ¼é
ç½® |
| | | base.updateModelConfig(model_name) |
| | | |
| | | #å建模åçä¸ç»´ç½æ ¼ |
| | | # #å建模åçä¸ç»´ç½æ ¼ |
| | | filedir = base.model3d_path + model_name |
| | | |
| | | print(filedir) |
| | | if not os.path.exists(filedir): |
| | | os.makedirs(filedir, exist_ok=True) |
| | | base.callModelexe() |
| | | |
| | | return jsonify(msg) |
| | | base.callModelexe() |
| | | #è®¡ç®æ°´èµæºéåæ°´åè¡¡ |
| | | CalHead.run_zonebudget_bal(model_name) |
| | | CalHead.run_zonebudget_res(model_name) |
| | | |
| | | return jsonify(dicts["msg"]) |
| | | |
| | | #çææ¨¡åcsv æä»¶ |
| | | @app.route('/runModelCsv/', methods=['GET']) |
| | |
| | | return jsonify(result) |
| | | |
| | | |
| | | |
| | | #åå§æ°´ä½ä¿¡æ¯ |
| | | @app.route('/initWater/', methods=['GET']) |
| | | def initWater(): |
| | | |
| | | period = request.args.get('period') |
| | | |
| | | per = int(period) |
| | | |
| | | model_name = request.args.get('model_name') |
| | | |
| | | ml= getModel(model_name) |
| | | item = ml.rch.rech.__getitem__(kper=per) |
| | | value = item.get_value() |
| | | t = np.array(value).tolist() |
| | | return jsonify(t) |
| | | |
| | | #åå»ºæ°æ¨¡å |
| | | @app.route('/saveModel/', methods=['GET']) |
| | | def saveModel(): |
| | |
| | | modelname = request.args.get('name') |
| | | startTime = request.args.get('startTime') |
| | | endTime = request.args.get('endTime') |
| | | remark = request.args.get('remark') |
| | | file_list = os.listdir(base.model_dir) |
| | | for name in file_list: |
| | | if name == modelname: |
| | | return jsonify("模ååç§°å·²ç»åå¨ï¼ä¸å
许éå¤å建ï¼") |
| | | |
| | | pers = ModelPeriod.get_months_in_range_count(startTime,endTime) |
| | | if pers > 60 : |
| | | return jsonify("模ååå»ºå¤±è´¥ï¼æå¤åªå
许60ä¸ªå¨æçè¿ç»é¢æµï¼") |
| | | |
| | | dir = base.model_dir + modelname |
| | | shutil.copytree(base.predictModel,dir) |
| | | |
| | | if pers==12: |
| | | shutil.copytree(base.predictModel,dir) |
| | | else: |
| | | #妿䏿¯ä¸å¹´ç颿µï¼ å¯è½ä¸º å
¶ä»çå¤å¨æç颿µ å¤§äº 12 ä¸ªå¨ææè
å°äº12ä¸ªå¨æ |
| | | #é¦å
æ60ä¸ªå¨æçå½å模åcloneä¸ä»½ï¼ ç¶åä¿®æ¹ dis wel rch æä»¶ |
| | | #wel æä»¶å rch æä»¶ä¸éè¦ä¿®æ¹ |
| | | shutil.copytree(base.predictModel60,dir) |
| | | Predict.updateDisFile(modelname,pers) |
| | | |
| | | |
| | | jsondata={"model_name":modelname,"start_time":startTime,"end_time":endTime} |
| | | predictionJson = base.model_dir + modelname +"\\prediction.json" |
| | | with open(predictionJson, "w",encoding='utf-8') as outfile: |
| | | json.dump(jsondata, outfile,ensure_ascii=False) |
| | | |
| | | |
| | | CalHead.addModelJson(modelname, startTime, endTime, remark) |
| | | return jsonify("åå»ºæ°æ¨¡å宿¯ï¼") |
| | | |
| | | |
| | | #åå»ºæ°æ¨¡å |
| | | @app.route('/ModelList/', methods=['GET']) |
| | | def ModelList(): |
| | | |
| | | file_list = os.listdir(base.model_dir) |
| | | return jsonify(file_list) |
| | | |
| | | #模åå表2 |
| | | @app.route('/ModelList2/', methods=['GET']) |
| | | def ModelList2(): |
| | | model_path = base.prefix +"\\model_list.json" |
| | | model_lsit="" |
| | | with open(model_path,encoding='utf-8') as f: |
| | | model_lsit = json.load(f) |
| | | return jsonify(model_lsit) |
| | | |
| | | #å 餿¨¡å |
| | | @app.route('/deleteModel/', methods=['GET']) |
| | | def deleteModel(): |
| | | model_name = request.args.get('model_name') |
| | | if not model_name: |
| | | return jsonify({"code":400,"msg":"ä¸å
许å é¤ç©ºç®å½ï¼"}) |
| | | if model_name == base.not_allowed_model: |
| | | return jsonify({"code":400,"msg":"éªè¯æ¨¡åä¸å
许å é¤ï¼"}) |
| | | |
| | | paths = base.model_dir + model_name |
| | | shutil.rmtree(paths) |
| | | CalHead.removeModelJson(model_name) |
| | | return jsonify({"code":200,"msg":"模åå é¤å®æ¯ï¼"}) |
| | | |
| | | #颿µåºæ¯åæ° |
| | | @app.route('/prediction', methods=['POST']) |
| | |
| | | predictionJson = base.model_dir + model_name +"\\prediction.json" |
| | | with open(predictionJson, "w",encoding='utf-8') as outfile: |
| | | json.dump(jsondata, outfile,ensure_ascii=False) |
| | | |
| | | return jsonify("ä¿å颿µåºæ¯åæ°å®æ¯ï¼") |
| | | |
| | | #è¿è¡æ¨¡å |
| | | dicts = Predict.run_model(model_name) |
| | | return jsonify(dicts["msg"]) |
| | | |
| | | |
| | | |
| | | #颿µåºæ¯åæ° |
| | | @app.route('/predictionparam', methods=['GET']) |
| | | def predictionparam(): |
| | | |
| | | model_name = request.args.get('model_name') |
| | | model_name = request.args.get('model_name') |
| | | print(model_name) |
| | | file_list = os.listdir(base.model_dir) |
| | | if model_name not in file_list: |
| | | return jsonify("模åä¸åå¨ï¼") |
| | |
| | | def pump_importdata(): |
| | | |
| | | model_name = request.form.get('model_name') |
| | | |
| | | # ml= getModel(model_name) |
| | | types = request.form.get('type') |
| | | file = request.files.get('file') |
| | | print(types) |
| | | |
| | | save_path = base.model_dir + model_name +"\\extra_cell.xlsx" |
| | | save_path = base.model_dir + model_name +"\\"+types+".xlsx" |
| | | print(save_path) |
| | | |
| | | if file: |
| | | file.save(save_path) |
| | | |
| | | resultDict={"code":200,"msg":"ä¿åæ°æ®å®æ¯ï¼"} |
| | | data= DataTransf.base_excel(model_name,types) |
| | | |
| | | resultDict={"code":200,"msg":"ä¿åæ°æ®å®æ¯ï¼","data":data} |
| | | return jsonify(resultDict) |
| | | |
| | | #è§æµäºå表 |
| | | @app.route('/obsWellList', methods=['GET']) |
| | | def obsWellList(): |
| | | obswell= base.obs_well |
| | | dict =[] |
| | | for name , row ,column in obswell: |
| | | obj ={"name":name,"row":row,"column":column,"Layer":1} |
| | | dict.append(obj) |
| | | dicts =[] |
| | | for wellId, name , row ,column in obswell: |
| | | obj ={"wellId":wellId,"name":name,"row":row,"column":column,"Layer":1} |
| | | dicts.append(obj) |
| | | |
| | | return jsonify(dict) |
| | | return jsonify(dicts) |
| | | |
| | | |
| | | #è§æµäºchartæ°æ®æ¥å£ |
| | |
| | | model_name = request.args.get('model_name') |
| | | row = request.args.get('row') |
| | | column = request.args.get('column') |
| | | wellId = request.args.get('wellId') |
| | | |
| | | result = CalHead.obsChartdata(model_name, row, column) |
| | | result = CalHead.obsChartdata(wellId,model_name, row, column) |
| | | |
| | | return jsonify(result) |
| | | |
| | |
| | | base_year = request.args.get('base_year') |
| | | start_time = request.args.get('start_time') |
| | | end_time = request.args.get('end_time') |
| | | |
| | | return jsonify(Predict.predict_river_chart(base_year, start_time, end_time)) |
| | | value = float(request.args.get('value')) |
| | | return jsonify(Predict.predict_river_chart(base_year, start_time, end_time,value)) |
| | | |
| | | #颿µé¡µé¢ éæ°´å¾è¡¨ |
| | | @app.route('/predictWaterChart', methods=['GET']) |
| | |
| | | base_year = request.args.get('base_year') |
| | | start_time = request.args.get('start_time') |
| | | end_time = request.args.get('end_time') |
| | | return jsonify(Predict.predict_water_chart(base_year, start_time, end_time)) |
| | | value = float(request.args.get('value')) |
| | | return jsonify(Predict.predict_water_chart(base_year, start_time, end_time,value)) |
| | | |
| | | @app.route('/heatmap', methods=['GET']) |
| | | def heatmap(): |
| | | #å¼ééçæçº¿å¾ |
| | | @app.route('/predictWellChart', methods=['POST']) |
| | | def predictWellChart(): |
| | | |
| | | json = request.get_json() |
| | | base_year = str(json['base_year']) |
| | | start_time = json['start_time'] |
| | | end_time = json['end_time'] |
| | | data = json['data'] |
| | | |
| | | return jsonify(Predict.predict_well_chart(base_year, start_time, end_time,data)) |
| | | |
| | | #æµåºå¾ |
| | | @app.route('/flowField', methods=['GET']) |
| | | def flowField(): |
| | | model_name = request.args.get('model_name') |
| | | period = request.args.get('period') |
| | | data = CalHead.heatmapdata(model_name,period) |
| | | maximum_value = np.max(data) |
| | | return jsonify(np.array(data).tolist()) |
| | | flowStartTime = int(request.args.get('flowStartTime')) |
| | | flowEndTime=int(request.args.get('flowEndTime')) |
| | | flowType= request.args.get('flowType') |
| | | # layer = int(request.args.get('layer') ) |
| | | |
| | | pic = str(int(time.time())) +".png" |
| | | outpath = base.flow_file + pic |
| | | dicts={} |
| | | if flowType=="æ°´ä½": |
| | | dicts = achiveReport.flow_field(model_name, flowStartTime, 0,"æµåºä¿¡æ¯", "online", outpath) |
| | | if flowType=="åå¹
": |
| | | dicts = achiveReport.getFlowFieldBF(model_name,flowStartTime,flowEndTime,outpath) |
| | | |
| | | if flowType=="åæ·±": |
| | | dicts = achiveReport.getFlowFieldDepth(model_name,flowStartTime, 0,"æµåºä¿¡æ¯", "online", outpath) |
| | | |
| | | dicts["path"] = "/xishan/xinshanFlow/"+pic |
| | | return jsonify(dicts) |
| | | |
| | | |
| | | #åå§æ°´ä½æµåºä¿¡æ¯ |
| | | @app.route('/initWaterFlow/', methods=['GET']) |
| | | def initWater(): |
| | | |
| | | model_name = request.args.get('model_name') |
| | | layer = int(request.args.get('layer') ) |
| | | pic = str(int(time.time())) +".png" |
| | | outpath = base.flow_file + pic |
| | | result= achiveReport.init_flow_field(model_name, layer,"åå§æµåºä¿¡æ¯", "online", outpath) |
| | | if result == "#": |
| | | return jsonify("") |
| | | return jsonify("/xishan/xinshanFlow/"+pic) |
| | | |
| | | |
| | | #éæ°´åè¡¡ |
| | |
| | | #å°ç页颿°æ® |
| | | @app.route('/earthWaterChart', methods=['GET']) |
| | | def earthWaterChart(): |
| | | |
| | | model_name = request.args.get('model_name') |
| | | indexId = int(request.args.get('index_id')) |
| | | data = CalHead.earthWaterChart("202001_202212",indexId) |
| | | data = CalHead.earthWaterChart(model_name,indexId) |
| | | return jsonify(data) |
| | | |
| | | #æ¸éç³»æ° |
| | | @app.route('/mdLpf', methods=['GET']) |
| | | def mdLpf(): |
| | | lf = base.md_lpf |
| | | return jsonify(np.array(lf).tolist()) |
| | | |
| | | #æ°´èµæºé |
| | | @app.route('/water_res', methods=['GET']) |
| | | def water_res(): |
| | | model_name = request.args.get('model_name') |
| | | #æ°´åè¡¡ |
| | | path1=base.muiltyModel + model_name +"\\water_bal.txt" |
| | | bal = CalHead.water_balance(model_name, path1) |
| | | path2=base.muiltyModel + model_name +"\\water_res.txt" |
| | | res = CalHead.water_res(model_name,path2) |
| | | |
| | | dicts = BigData.mergeWaterData(bal,res) |
| | | |
| | | initFlowPNG = achiveReport.getWaterResFiled(model_name,0) |
| | | dicts["initFlowPNG"]= "/xishan/xinshanFlow/" + initFlowPNG |
| | | |
| | | FlowPNG2 = achiveReport.getWaterResFiled(model_name,11) |
| | | dicts["lastFlowPNG"]= "/xishan/xinshanFlow/" + FlowPNG2 |
| | | dicts["initMonth"] ="2023-01" |
| | | dicts["lastMonth"] ="2023-12" |
| | | |
| | | #æ°´èµæºé |
| | | return dicts |
| | | |
| | | |
| | | #夿¨¡åå°ä¸æ°´ä½ |
| | | @app.route('/water_depth', methods=['GET']) |
| | | def water_depth(): |
| | | model_name = request.args.get('model_name') |
| | | |
| | | result = CalHead.water_depth(model_name) |
| | | #æ°´èµæºé |
| | | return result |
| | | |
| | | |
| | | #å°åºæ¯ åä¸ªæ¨¡åæ°´ä½ ååå¹
|
| | | @app.route('/xs_depth', methods=['GET']) |
| | | def xs_depth(): |
| | | model_name = request.args.get('model_name') |
| | | |
| | | res = CalHead.xs_depth(model_name) |
| | | jsondata= CalHead.get_model_json(model_name) |
| | | start_time = jsondata["start_time"] |
| | | end_time = jsondata["end_time"] |
| | | months = ModelPeriod.get_months_in_range_ym(start_time, end_time) |
| | | res["months"] = months |
| | | #æ°´èµæºé |
| | | return res |
| | | |
| | | #ä¿åçæµç«éé¨é |
| | | @app.route('/sensor_jyl_list', methods=['GET']) |
| | | def sensor_jyl_list(): |
| | | model_name = request.args.get('model_name') |
| | | data = OpenExcel.read_excel(model_name) |
| | | return jsonify(data) |
| | | |
| | | #ä¿åçæµç«éé¨é |
| | | @app.route('/sensor_jyl_save', methods=['POST']) |
| | | def sensor_jyl_save(): |
| | | json = request.get_json() |
| | | model_name = str(json['model_name']) |
| | | data = json['data'] |
| | | OpenExcel.write_excel(model_name,data) |
| | | return jsonify("ä¿å宿¯") |
| | | |
| | | #ææ¥å
容 |
| | | @app.route('/xs_month_report', methods=['GET']) |
| | | def xs_month_report(): |
| | | model_name = request.args.get('model_name') |
| | | per = int(request.args.get('period')) |
| | | res = achiveReport.archive_report_content(model_name,per) |
| | | return res |
| | | |
| | | |
| | | #宿¶æ°æ® |
| | | @app.route('/xs_real_data', methods=['GET']) |
| | | def xs_real_data(): |
| | | num = request.args.get('num') |
| | | start_time = request.args.get('start_time') |
| | | end_time = request.args.get('end_time') |
| | | types = request.args.get('types') |
| | | res = DataTask.get_data(types,num,start_time,end_time) |
| | | return jsonify(res) |
| | | |
| | | |
| | | if __name__ == '__main__': |
| | | #app.run() # å¯ä»¥æå®è¿è¡ç主æºIPå°åï¼ç«¯å£ï¼æ¯å¦å¼å¯è°è¯æ¨¡å¼ |
| | | app.run(host="192.168.0.122", port=5000) |
| | | |
| | | |
| | | |
| | | app.run(host="localhost", port=5000) |
| | | |
| | | |
| | | |
| | |
| | | from datetime import datetime |
| | | import calendar |
| | | from dateutil import rrule |
| | | import json |
| | | import Base as base |
| | | import os |
| | | |
| | | |
| | | |
| | | #æ ¹æ®å¹´ææ¥è·åæåä¸å¤©çæ¥æ |
| | |
| | | last_day= last_day_of_month(int(start_date[0]),int(start_date[1]),1) |
| | | return last_day |
| | | |
| | | |
| | | def get_months_in_year(): |
| | | return ["1æ","2æ","3æ","4æ","5æ","6æ","7æ","8æ","9æ","10æ","11æ","12æ",] |
| | | |
| | | def get_months_in_range_ym(start_time, end_time): |
| | | |
New file |
| | |
| | | |
| | | from openpyxl import load_workbook |
| | | import os |
| | | import shutil |
| | | import Base as base |
| | | |
| | | |
| | | def save_excel(model_name): |
| | | dst = base.model_dir + model_name +"\\jyl.xlsx" |
| | | shutil.copyfile(base.prefix +"é¨éç«è¾å
¥æ¨¡æ¿.xlsx",dst) |
| | | |
| | | #读åexcelæ¨¡æ¿æ°æ® |
| | | def read_excel(model_name): |
| | | paths = base.model_dir + model_name +"\\jyl.xlsx" |
| | | data=[] |
| | | if not os.path.exists(paths): |
| | | return data |
| | | wb = load_workbook(filename = paths) |
| | | ws = wb[wb.sheetnames[1]] |
| | | |
| | | for row in ws.iter_rows(): |
| | | tmp =[] |
| | | for cell in row: |
| | | tmp.append(cell.value) |
| | | data.append(tmp) |
| | | wb.close() |
| | | return data |
| | | |
| | | |
| | | #读åéé¨éçè®¡ç®æä»¶ |
| | | def read_jyl_excel(model_name): |
| | | paths = base.model_dir + model_name +"\\jyl.xlsx" |
| | | data=[] |
| | | if not os.path.exists(paths): |
| | | return data |
| | | wb = load_workbook(filename = paths,data_only=True) |
| | | ws = wb[wb.sheetnames[2]] |
| | | |
| | | for row in ws.iter_rows(): |
| | | tmp =[] |
| | | for cell in row: |
| | | tmp.append(cell.value) |
| | | data.append(tmp) |
| | | wb.close() |
| | | return data |
| | | |
| | | |
| | | def write_excel(model_name,data): |
| | | paths = base.model_dir + model_name +"\\jyl.xlsx" |
| | | if not os.path.exists(paths): |
| | | save_excel(model_name) |
| | | wb = load_workbook(filename = paths) |
| | | ws = wb[wb.sheetnames[1]] |
| | | for i in range(len(data)): |
| | | for j in range(len(data[i])): |
| | | ws.cell(row=i+1, column=j+1).value = data[i][j] |
| | | wb.save(paths) |
| | | wb.close() |
| | | |
| | | |
| | | |
| | |
| | | |
| | | # 导å
¥Flaskç±» |
| | | from flask import Flask |
| | | from flask import jsonify |
| | | from flask import request |
| | | from flask_cors import CORS |
| | | import sys |
| | | import numpy as np |
| | | import pandas as pd |
| | | import flopy |
| | | import flopy.utils.binaryfile as bf |
| | | import csv |
| | | import time |
| | | from openpyxl import load_workbook |
| | | import os |
| | | import shutil |
| | | import json |
| | | import Base as base |
| | | import CalHead |
| | | import ModelPeriod |
| | | import OpenExcel |
| | | |
| | | |
| | | base_init_year=["2020","2021","2022"] |
| | |
| | | #éæ°´é |
| | | |
| | | base_water = base.prefix + 'base_water.ini' |
| | | def predict_water_chart(base_year,start_time ,end_time): |
| | | |
| | | |
| | | def predict_water_chart(base_year,start_time ,end_time,value): |
| | | water_array = np.loadtxt(base_water, dtype=str,encoding='utf-8') |
| | | |
| | | y_data=[] |
| | | x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12") |
| | | water= water_array[0] |
| | | for e in water: |
| | | y_data.append(e) |
| | | x_data= ModelPeriod.get_months_in_year() |
| | | water= water_array[int(base_year)] |
| | | for data in water: |
| | | float_data = float(data)/9 |
| | | float_data= round(float_data*value,2) |
| | | y_data.append(float_data) |
| | | |
| | | result = {"y_data": y_data, "x_data": x_data} |
| | | return result |
| | |
| | | #æ²³æµçæçº¿å¾ |
| | | |
| | | base_river = base.prefix + 'base_river.ini' |
| | | def predict_river_chart(base_year,start_time ,end_time): |
| | | |
| | | |
| | | def predict_river_chart(base_year,start_time ,end_time,value): |
| | | river_array = np.loadtxt(base_river, dtype=str,encoding='utf-8') |
| | | |
| | | y_data=[] |
| | | x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12") |
| | | for e in river_array: |
| | | y_data.append(e) |
| | | x_data= ModelPeriod.get_months_in_year() |
| | | for data in river_array: |
| | | #ä¸ç«æ¹ç±³ |
| | | float_data = float(data)/4/10000 |
| | | float_data= round(float_data*value,2) |
| | | y_data.append(float_data) |
| | | |
| | | result = {"y_data": y_data, "x_data": x_data} |
| | | return result |
| | | |
| | | |
| | | #å¼ééæçº¿å¾ |
| | | base_mining = base.prefix + 'base_mining.ini' |
| | | def predict_well_chart(base_year,start_time ,end_time,data): |
| | | # 2022/æµ·æ·/æå¹³/é¨å¤´æ²/ç³æ¯å±± |
| | | area_names=["å
¨é¨åºå","æµ·æ·åº","æå¹³åº","é¨å¤´æ²åº","ç³æ¯å±±åº"] |
| | | mining_array = np.loadtxt(base_mining, dtype=str,encoding='utf-8') |
| | | x_data= ModelPeriod.get_months_in_year() |
| | | result = {"x_data": x_data} |
| | | y_data=[] |
| | | for item in data: |
| | | name = item["area_name"] |
| | | value = float(item["value"]) |
| | | if name =='æé³åº': |
| | | continue |
| | | index = area_names.index(name) |
| | | row_data = mining_array[index] |
| | | float_data=[] |
| | | for item in row_data: |
| | | x = round(float(item)/10000*value,2) |
| | | float_data.append(x) |
| | | dicts={"name":name,"data":float_data} |
| | | y_data.append(dicts) |
| | | result["y_data"] = y_data |
| | | return result |
| | | |
| | | |
| | | def run_model(model_name): |
| | | if model_name == base.not_allowed_model or model_name in base.archive_models: |
| | | return "æ¬æ¨¡å为éªè¯æ¨¡å,ä¸å
许修æ¹ï¼" |
| | | |
| | | dicts= run_model_predict(model_name) |
| | | if dicts["code"] == 400: |
| | | return dicts |
| | | #导åºcsvæä»¶ |
| | | CalHead.exportCsV(model_name) |
| | | |
| | | # #æ´æ°æ¨¡åä¸ç»´ç½æ ¼é
ç½® |
| | | base.updateModelConfig(model_name) |
| | | |
| | | # #å建模åçä¸ç»´ç½æ ¼ |
| | | filedir = base.model3d_path + model_name |
| | | |
| | | if not os.path.exists(filedir): |
| | | os.makedirs(filedir, exist_ok=True) |
| | | |
| | | base.callModelexe() |
| | | #è®¡ç®æ°´èµæºéåæ°´åè¡¡ |
| | | CalHead.run_zonebudget_bal(model_name) |
| | | CalHead.run_zonebudget_res(model_name) |
| | | dicts= {"code":200,"msg":"颿µæ¨¡åè¿è¡æåï¼" } |
| | | return dicts |
| | | |
| | | |
| | | def run_model_predict(model_name): |
| | | |
| | | |
| | | predictiondata="" |
| | | prediction_path = base.model_dir + model_name +"\\prediction.json" |
| | | if os.path.exists(prediction_path): |
| | | with open(prediction_path,encoding='utf-8') as f: |
| | | predictiondata = json.load(f) |
| | | predictiondata = json.load(f) |
| | | |
| | | if predictiondata: |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | |
| | | if predictiondata: |
| | | updateDisFile(model_name,periods_len) |
| | | try: |
| | | updateDisFile(model_name,predict_per) |
| | | |
| | | updateBase6File(model_name,predictiondata) |
| | | flag = updateBase6File(model_name,predictiondata) |
| | | if bool(flag)==False: |
| | | dicts= {"code":400,"msg":"è¯·æ£æ¥æ¨¡åçåå§æ°´æ¯å¦è®¾ç½®ï¼" } |
| | | return dicts |
| | | except: |
| | | dicts= {"code":400,"msg":"è¯·æ£æ¥æ¨¡åçåå§æ°´æ¯å¦è®¾ç½®ï¼" } |
| | | return dicts |
| | | |
| | | updateRchFile(model_name,predictiondata) |
| | | try: |
| | | updateRchFile(model_name,predictiondata) |
| | | except: |
| | | print("RchFileæ 颿µåæ°ï¼æ éä¿®æ¹ï¼") |
| | | |
| | | try: |
| | | updateRiverFile(model_name,predictiondata) |
| | | except: |
| | | |
| | | return "è¯·æ£æ¥åå§æ°´å¤´ãéæ°´éãæ°¸å®æ²³å
¥æ¸éãå¼ééçåæ°æ¯å¦å¡«å宿´ï¼" |
| | | |
| | | |
| | | except: |
| | | print("RiverFileæ 颿µåæ°ï¼æ éä¿®æ¹ï¼") |
| | | else: |
| | | print("prediction.json 颿µåºæ¯æä»¶ä¸ºç©ºï¼æ éæ´æ¹ç¸åºæä»¶") |
| | | |
| | |
| | | ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | ml.run_model(report = True) |
| | | return "颿µæ¨¡åè¿è¡æåï¼" |
| | | dicts= {"code":200,"msg":"颿µæ¨¡åè¿è¡æåï¼" } |
| | | return dicts |
| | | |
| | | |
| | | |
| | |
| | | |
| | | rain_ratio = float(predictiondata["rain"]["ratio"]) |
| | | rain_base_year = predictiondata["rain"]["base_year"] |
| | | if rain_base_year=='4': |
| | | rain_base_year="1" |
| | | |
| | | river_ratio= float(predictiondata["river"]["ratio"]) |
| | | area= predictiondata["mine"]["area"] |
| | | |
| | | ws = base.predictParamModel + rain_base_year |
| | | # ws = base.predictParamModel + rain_base_year |
| | | ws="" |
| | | start_time = predictiondata["start_time"] |
| | | end_time = predictiondata["end_time"] |
| | | count = ModelPeriod.get_months_in_range_count(start_time, end_time) |
| | | if count==12: |
| | | ws= base.predictParamModel + rain_base_year |
| | | else: |
| | | ws= base.predictModel60 |
| | | |
| | | baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | |
| | | |
| | | lrcq = {} |
| | | |
| | | for per in range(predict_per): |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | |
| | | for per in range(periods_len): |
| | | wel = [] |
| | | array2d = [] |
| | | |
| | |
| | | array = [Layer, Row, Column, Q] |
| | | array2d.append(array) |
| | | |
| | | #追å äº |
| | | flex_data= getFlexdata(model_name) |
| | | print("==============") |
| | | print(flex_data) |
| | | |
| | | for i in range(len(flex_data)): |
| | | array2d.append(flex_data[i]) |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq) |
| | | flopy.modflow.ModflowWel(updateMdoel, |
| | | ipakcb= baseMdoel.wel.ipakcb, |
| | | dtype=baseMdoel.wel.dtype, |
| | | options=baseMdoel.wel.options, |
| | | stress_period_data=lrcq) |
| | | updateMdoel.write_input() |
| | | |
| | | else: |
| | |
| | | if flag == "true": |
| | | #丰水年 æ¯æ°´å¹´ |
| | | base_year = predictiondata["rain"]["base_year"] |
| | | ratio= float(predictiondata["rain"]["ratio"]) |
| | | if base_year =="1" or base_year =="2" or base_year =="3": |
| | | updateRchBaseYear(model_name,predictiondata) |
| | | elif base_year =="4": |
| | | #æ¥å
¥ä¼ æå¨æ°æ® |
| | | updateRchRealData(model_name,predictiondata) |
| | | |
| | | else: |
| | | print("Rchæä»¶æ éä¿®æ¹ï¼") |
| | | |
| | | #æ´æ°å¹³æ°´å¹´ 丰水年 æ¯æ°´å¹´æ°æ® |
| | | def updateRchBaseYear(model_name,predictiondata): |
| | | #丰水年 æ¯æ°´å¹´ |
| | | base_year = predictiondata["rain"]["base_year"] |
| | | ratio= float(predictiondata["rain"]["ratio"]) |
| | | #æ°æ®æ¥æºç模åæä»¶å¤¹ |
| | | base_ws= base.predictParamModel + base_year |
| | | |
| | | base_ws="" |
| | | start_time = predictiondata["start_time"] |
| | | end_time = predictiondata["end_time"] |
| | | count = ModelPeriod.get_months_in_range_count(start_time, end_time) |
| | | if count==12: |
| | | base_ws= base.predictParamModel + base_year |
| | | else: |
| | | base_ws= base.predictModel60 |
| | | |
| | | baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | |
| | | updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | for per in range(predict_per): |
| | | |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | for per in range(periods_len): |
| | | item = baseMdoel.rch.rech.__getitem__(kper = per) |
| | | array2d = item.get_value() |
| | | array2d_len = len(array2d) |
| | | |
| | | for i in range(array2d_len): |
| | | |
| | | array_len = len(array2d[i]) |
| | | for j in range(array_len): |
| | | |
| | | if str(base.area_array[i][j]) != '-9999': |
| | | |
| | | for j in range(array_len): |
| | | if str(base.area_array[i][j]) != '-9999': |
| | | array2d[i][j] = array2d[i][j] * ratio |
| | | |
| | | updateMdoel.rch.rech.__setitem__(key = per, value=array2d) |
| | | |
| | | rch = flopy.modflow.ModflowRch(updateMdoel, rech=updateMdoel.rch.rech) |
| | | rch.write_file(check=False) |
| | | updateMdoel.rch.rech.__setitem__(key = per, value=array2d) |
| | | rch = flopy.modflow.ModflowRch(updateMdoel, |
| | | nrchop=baseMdoel.rch.nrchop, |
| | | ipakcb=baseMdoel.rch.ipakcb, |
| | | rech=updateMdoel.rch.rech, |
| | | irch =baseMdoel.rch.irch) |
| | | |
| | | rch.write_file(check=False) |
| | | |
| | | |
| | | #æ¥å
¥ä¼ æå¨æ°æ® |
| | | def updateRchRealData(model_name,predictiondata): |
| | | |
| | | # å平水年çrchæä»¶ |
| | | base_year = "1" |
| | | ratio= float(predictiondata["rain"]["ratio"]) |
| | | if not ratio: |
| | | ratio = 1 |
| | | excel_data = OpenExcel.read_jyl_excel(model_name) |
| | | array_data =[] |
| | | |
| | | else: |
| | | print("Rchæä»¶æ éä¿®æ¹ï¼") |
| | | if not excel_data: |
| | | print("éæ°´æ¨¡æ¿æ°æ®æªè·åå°ï¼") |
| | | return "éæ°´æ¨¡æ¿æ°æ®æªè·åå°ï¼" |
| | | for i in range(1,len(excel_data)): |
| | | temp =[] |
| | | for j in range(1,len(excel_data[i])): |
| | | data = round(float(excel_data[i][j]),8) |
| | | temp.append(data) |
| | | array_data.append(temp) |
| | | |
| | | base_ws="" |
| | | start_time = predictiondata["start_time"] |
| | | end_time = predictiondata["end_time"] |
| | | count = ModelPeriod.get_months_in_range_count(start_time, end_time) |
| | | if count==12: |
| | | base_ws= base.predictParamModel + base_year |
| | | else: |
| | | base_ws= base.predictModel60 |
| | | |
| | | baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | update_model_ws = base.model_dir + model_name |
| | | |
| | | updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | #éé¨ååº |
| | | area_dicts = base.getAreas() |
| | | |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | |
| | | for per in range(periods_len): |
| | | #16个ååºæä¸ªå¨æç æ°æ® 䏿 (0,15) åarea_dictsåå
¸éé¨ååºçkeyå¯¹åº |
| | | water_per_data = get_Array2_column(array_data,per) |
| | | |
| | | item = baseMdoel.rch.rech.__getitem__(kper = per) |
| | | array2d = item.get_value() |
| | | for key in area_dicts: |
| | | tuples= area_dicts[key] |
| | | values = water_per_data[int(key)] |
| | | for i in range(len(tuples)): |
| | | x = tuples[i][0] |
| | | y = tuples[i][1] |
| | | array2d[x][y]= values*ratio |
| | | |
| | | updateMdoel.rch.rech.__setitem__(key = per, value=array2d) |
| | | |
| | | rch = flopy.modflow.ModflowRch(updateMdoel, |
| | | nrchop=baseMdoel.rch.nrchop, |
| | | ipakcb=baseMdoel.rch.ipakcb, |
| | | rech=updateMdoel.rch.rech, |
| | | irch =baseMdoel.rch.irch) |
| | | |
| | | rch.write_file(check=False) |
| | | print("éæ°´ååºæ°æ®æ´æ°å®æ¯!") |
| | | return "éæ°´ååºæ°æ®æ´æ°å®æ¯ï¼" |
| | | |
| | | #è·ååæ°æ® |
| | | def get_Array2_column(array_data,column): |
| | | arr = np.array(array_data) |
| | | column_data = arr[:, column] |
| | | return column_data |
| | | |
| | | |
| | | def check_rain_param(predictiondata): |
| | | |
| | |
| | | model_ws = base.model_dir + model_name |
| | | ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | |
| | | #åå§æ°´å¤´ |
| | | init_header = predictdata["initHeader"] |
| | | |
| | | dir = base.model_dir + init_header + "\\modflow.head" |
| | | if "initHeader" not in predictdata: |
| | | print("=============has no initHeader=============") |
| | | return False |
| | | init_header = predictdata["initHeader"] |
| | | |
| | | dir = base.model_dir + init_header + "\\modflow.head" |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | |
| | | lens = len(alldata) |
| | | last_index = lens-3 |
| | | |
| | | last_index = len(alldata)-1 |
| | | last_array3= alldata[last_index] |
| | | |
| | | strt = ml.bas6.strt |
| | |
| | | extension="bas6",) |
| | | |
| | | mfBase6.write_file(check=False) |
| | | return True |
| | | |
| | | |
| | | #ä¿®æ¹dis æä»¶ |
| | |
| | | extension="dis") |
| | | |
| | | mfDis.write_file(check=False) |
| | | |
| | | |
New file |
| | |
| | | |
| | | import flopy.utils.binaryfile as bf |
| | | import Base as base |
| | | import numpy as np |
| | | |
| | | #ç»æ°´åº¦ |
| | | water_u = 0.2 |
| | | #ç ç©¶åºåé¢ç§¯å¹³æ¹ç±³ |
| | | water_F= float(5652 * 500 *500) |
| | | #ææè®¡ç®åå
æ ¼ |
| | | water_invalid_cell =5652 |
| | | #ç ç©¶åºåå¹³æ¹åç±³ |
| | | water_F_KM= 680.250 |
| | | |
| | | pyq_cells_total =2721 |
| | | |
| | | |
| | | #å°ä¸æ°´èåé |
| | | def get_grd_storage(model_name,per1,per2): |
| | | arr = np.loadtxt(base.xs_mp_path, dtype=int) |
| | | pyq_cells=[] |
| | | k=0 |
| | | for i in range(len(arr)): |
| | | for j in range(len(arr[i])): |
| | | if arr[i][j] == 1: |
| | | k+=1 |
| | | pyq_cells.append((i,j)) |
| | | |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | |
| | | if model_name=="202001_202212": |
| | | dir = base.baseModel2 + "\\modflow.head" |
| | | |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | |
| | | #åå§æ°´ä½æ°æ® |
| | | z_start = alldata[int(per1+1)*3-3,0,:,:] |
| | | #ç»ææ°´ä½æ°æ® |
| | | |
| | | z_end = alldata[int(per2+1)*3-1,0,:,:] |
| | | |
| | | z_start[(z_start<=0)] = 0 |
| | | z_end[(z_end<=0)] = 0 |
| | | |
| | | # z_start_avg = float(np.sum(z_start)/5652) |
| | | # z_end_avg = float(np.sum(z_end)/5652) |
| | | |
| | | z_start_total = 0 |
| | | z_end_total = 0 |
| | | for item in pyq_cells: |
| | | i = item[0] |
| | | j = item[1] |
| | | z_start_total += z_start[i,j] |
| | | z_end_total += z_end[i,j] |
| | | |
| | | z_start_avg = z_start_total/2721 |
| | | z_end_avg = z_end_total/2721 |
| | | |
| | | #ÎWï¼100·(h1-h2)·μ·F/t |
| | | year = (per2+1-per1)/12 |
| | | # print(year) |
| | | storage = 100 * (z_start_avg-z_end_avg) * water_u * water_F_KM /year |
| | | return storage |
| | | |
| | | |
| | | |
| | | #ç¸å¯¹åè¡¡å·® Q1 |
| | | #Qæ»è¡¥ï¼Qæ»æÂ±ÎWï¼Î§ |
| | | # Χ/Qæ»è¡¥ *100% |
| | | def get_grd_relative_equ(Q1, Q2,w): |
| | | x= ( Q1-Q2) + w |
| | | y = x/Q1 |
| | | return y |
| | | |
| | | |
| | | |
New file |
| | |
| | | |
| | | # 导å
¥Flaskç±» |
| | | from flask import Flask |
| | | from flask import jsonify |
| | | from flask import request |
| | | from flask_cors import CORS |
| | | import pymssql |
| | | |
| | | |
| | | # Flask彿°æ¥æ¶ä¸ä¸ªåæ°__name__ï¼å®ä¼æåç¨åºæå¨çå
|
| | | app = Flask(__name__) |
| | | CORS(app, supports_credentials=True, resources=r'/*') |
| | | |
| | | server = '192.168.0.123:1433' |
| | | user='sa' |
| | | password='admin123X' |
| | | database ='microseism3' |
| | | |
| | | #æ ¹æ®æä»½è·åæ°æ® |
| | | def get_event_location_data(month): |
| | | |
| | | conn = pymssql.connect(server=server, user=user, password=password, database=database,as_dict=True) |
| | | cursor = conn.cursor() |
| | | res =[] |
| | | try: |
| | | sqlStr = 'SELECT * FROM dbo.event_location_'+ str(month) |
| | | # æ§è¡æ¥è¯¢è¯å¥æå
¶ä»æä½ |
| | | cursor.execute(sqlStr) |
| | | |
| | | # è·åç»æé |
| | | result = cursor.fetchall() |
| | | |
| | | for row in result: |
| | | dic={"x":row["Event_X"],"y":row["Event_Y"],"z":row["Event_Z"],"v":row["Event_Energy"]} |
| | | res.append(dic) |
| | | |
| | | except Exception as e: |
| | | print("Error occurred:", str(e)) |
| | | return [] |
| | | |
| | | finally: |
| | | # å
³éè¿æ¥ |
| | | cursor.close() |
| | | conn.close() |
| | | return res |
| | | |
| | | #æ ¹æ®ä¼ å
¥çæä»½æ¥è¯¢æ°æ® |
| | | @app.route('/get_event_location_data', methods=['GET']) |
| | | def event_location_data(): |
| | | month = request.args.get('month') |
| | | res = get_event_location_data(month) |
| | | return jsonify(res) |
| | | |
| | | |
| | | if __name__ == '__main__': |
| | | #app.run() # å¯ä»¥æå®è¿è¡ç主æºIPå°åï¼ç«¯å£ï¼æ¯å¦å¼å¯è°è¯æ¨¡å¼ |
| | | app.run(host="192.168.0.107", port=8080) |
| | | |
| | | |
| | | |