From 1ae38ab34bcbdff622c7623119ee54bad419f4ed Mon Sep 17 00:00:00 2001
From: zmk <496160012@qq.com>
Date: 星期三, 28 八月 2024 16:11:08 +0800
Subject: [PATCH] 提交代码

---
 Base.py             |   45 
 OpenExcel.py        |   61 
 AchiveReport.py     |  494 ++++++
 GRU_zmk.py          |  275 +++
 GRU_zmk_pre.py      |  267 +++
 CalHead.py          |  908 ++++++++++--
 DataTransf.py       |  526 +++++++
 /dev/null           |   22 
 MainAPI.py          |  620 ++++----
 Predict.py          |  303 +++
 DataTask.py         |  325 ++++
 GRU_zmk_pre - 副本.py |  215 ++
 WaterXBL.py         |   77 +
 main.py             |   60 
 BigData.py          |   39 
 ModelPeriod.py      |    7 
 Base-dev.py         |  173 ++
 17 files changed, 3,852 insertions(+), 565 deletions(-)

diff --git a/AchiveReport.py b/AchiveReport.py
new file mode 100644
index 0000000..4abf631
--- /dev/null
+++ b/AchiveReport.py
@@ -0,0 +1,494 @@
+import matplotlib.pyplot as plt
+from matplotlib import cm
+import numpy as np
+import math
+from docxtpl import DocxTemplate
+import flopy
+import flopy.utils.binaryfile as bf
+from matplotlib import cm, colors
+import os
+import time 
+import Base as base
+import CalHead
+import WaterXBL
+
+#鑾峰彇Z鍊�
+def get_flow_Z(model_name,per,layer):  
+    dir = base.model_dir + model_name  + "\\modflow.head" 
+    mlist = CalHead.muiltyModelList()
+    if model_name==base.not_allowed_model:
+         dir = base.baseModel2  + "\\modflow.head" 
+    if  model_name in mlist :
+         dir = base.muiltyModel + model_name  + "\\modflow.head" 
+    
+    head = bf.HeadFile(dir)
+    alldata = head.get_alldata()  
+    #鑾峰彇鍛ㄦ湡鏁版嵁
+    z = alldata[int(per+1)*3-1,int(layer),:,:] 
+    z[(z<=0)] = 0 
+    return z
+
+#璇诲彇base6鏂囦欢璇诲彇鍒濆姘翠綅
+def get_init_flowZ(model_name,layer):
+     model_ws = base.model_dir + model_name
+     ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
+                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
+     
+     strs = ml.bas6.strt.__getitem__(layer)
+     z = strs.get_value()
+     z[(z<=0)] = 0 
+     
+     #澶勭悊楠岃瘉妯″瀷鐨勬棤鏁堣竟鐣�
+     if model_name==base.not_allowed_model:
+         arr = np.loadtxt(base.areapath, dtype=int)
+         for i in range(len(arr)):
+             for j in range(len(arr[i])):           
+                 if arr[i][j] == -9999:
+                     z[i][j]=0
+     return z
+     
+#鑾峰彇娴佸満鍥剧殑鎵撲簡绛夊�肩嚎鏁扮粍
+def get_flow_levels(data):
+    maxdata= np.max(data)
+    levels=[]
+    levels.append(0.1)
+    lines = 10 
+    while lines < maxdata:
+        levels.append(lines)
+        lines = lines + 10
+    levels.append(math.ceil(maxdata))
+    return levels
+
+
+#鑾峰彇鍙樺箙姣旇緝灏忕殑绛夊�肩嚎鍥�
+def get_flow_levels_small(min , max, line_count):
+
+    step = round((max-min)/line_count,0) 
+    if step <= 0:
+        step = 0.5
+    levels=[]
+    levels.append(min)
+    lines = min + step
+    while lines < max:
+        levels.append(lines)
+        lines = lines + step
+    levels.append(max)
+    return levels
+
+#鑾峰彇鍙樺箙鐨刏鍊�
+def get_bf_z(model_name,per1,per2):
+    
+    dir = base.model_dir + model_name  + "\\modflow.head" 
+    mlist = CalHead.muiltyModelList()
+    if model_name==base.not_allowed_model:
+         dir = base.baseModel2  + "\\modflow.head" 
+    if  model_name in mlist :
+         dir = base.muiltyModel + model_name  + "\\modflow.head" 
+    
+    head = bf.HeadFile(dir)
+    alldata = head.get_alldata()  
+  
+    #鍙樺箙鍊�
+    z1= alldata[int(per1+1)*3-1,0,:,:]  
+    z2 = alldata[int(per2+1)*3-1,0,:,:]  
+    res = np.subtract(z2,z1)   
+    return res  
+    
+
+#鑾峰彇鍙樺箙鐨勬祦鍦哄浘
+def getFlowFieldBF(model_name,per1,per2,outpath):
+    res = get_bf_z(model_name,per1,per2)
+    max = np.max(res)
+    min = np.min(res)
+    # 鎶婃棤鏁堝尯璁剧疆涓�9999
+    arr = np.loadtxt(base.areapath, dtype=int)
+    for i in range(len(arr)):
+       for j in range(len(arr[i])):  
+           if arr[i][j] == -9999:
+                res[i][j]= 9999.0
+    levels= get_flow_levels_small(min,max,10)
+    draw_flow(res,levels,outpath,cm.RdBu_r)
+    
+    str_levs=[]
+    for item in levels:
+        str_levs.append(str(round(item,2)))
+    
+    cols = get_gradient_color_list("RdBu_r",len(levels))
+    dicts ={"levels":str_levs,"colors":cols}
+    xbl = WaterXBL.get_grd_storage(model_name,int(per1),int(per2))
+    dicts["xbl"]=xbl
+    return dicts  
+
+#鑾峰彇鍙樺箙 鍩嬫繁
+def getFlowFieldDepth(model_name,per,layer,title,flow_types,out_path):
+    Z= get_flow_Z(model_name,per,layer)
+    dis = np.loadtxt(base.dis_top_path, dtype=str)
+    areaMatrix = np.loadtxt(base.areapath, dtype=int)
+    for i in range(len(areaMatrix)):
+       for j in range(len(areaMatrix[i])):  
+           if areaMatrix[i][j] == -9999:
+                Z[i][j] =0.0
+           else :   
+                Z[i][j] =float(dis[i][j])-Z[i][j]
+
+    levels=[10,30,40,60,100,200,300,400,500,700,900]
+    draw_flow(Z,levels,out_path,cm.RdBu_r)
+    cols = get_gradient_color_list("RdBu_r",len(levels))
+    dicts ={"levels":levels,"colors":cols}
+    return dicts
+      
+     
+def getWaterResFiled(model_name,per):
+    pic = str(int(time.time())) +".png"
+    outpath = base.flow_file + pic  
+    flow_field(model_name,per,0,"flow","online",outpath)
+    return pic
+    
+#娴佸満鍥�
+def flow_field(model_name,per,layer,title,flow_types,out_path):  
+    Z= get_flow_Z(model_name,per,layer)
+    levels = get_flow_levels(Z)
+    draw_flow(Z,levels,out_path,cm.RdBu)
+    cols = get_gradient_color_list("RdBu",len(levels))
+    dicts ={"levels":levels,"colors":cols}
+    return dicts
+
+
+#鍒濆娴佸満鍥�
+def init_flow_field(model_name,layer,title,flow_types,out_path):
+    #濡傛灉鏄獙璇佹ā鍨嬬洿鎺ヨ鍙朾ase6鐨勬枃浠跺綋鍋氬垵濮嬫按澶�
+    if model_name == base.not_allowed_model:
+        Z= get_init_flowZ(model_name,layer)
+        levels = get_flow_levels(Z)
+        draw_flow(Z,levels,out_path,cm.RdBu)
+    else:
+        pjson= CalHead.get_model_json(model_name)
+        if "initHeader" in pjson:
+             initHead = pjson["initHeader"]
+             print(initHead)
+             Z= get_flow_Z(initHead,0,0)
+             levels = get_flow_levels(Z)
+             draw_flow(Z,levels,out_path,cm.RdBu)
+        else:
+            return "#"
+    print(out_path)
+    return out_path
+
+
+ #缁樺埗娴佸満
+def draw_flow(Z,levels,out_path,colorbar):
+
+     #寮�濮嬬粯鍒舵祦鍦哄浘
+    x = np.arange(0, 114, 1)
+    y = np.arange(0, 104, 1) 
+    #缃戞牸
+    X, Y = np.meshgrid(x, y)
+    #鍧愭爣杞寸炕杞� 宸︿笂瑙掍负璧峰
+    plt.gca().invert_yaxis()   
+    #绛夊�肩嚎 
+    C = plt.contour(X, Y, Z, levels=levels, linewidths=0.5, colors='white')  
+    #绛夊�肩嚎鏍峰紡
+    plt.clabel(C, inline=2,fmt='%.2f', fontsize=4,colors='black')  
+    #缁樺埗绛夊�肩嚎涔嬮棿鐨勫~鍏�   # cmap="RdBu_r"  cm.RdBu_r
+    plt.contourf(X, Y, Z,levels= levels,alpha = 0.75,cmap=colorbar)
+   
+    plt.axis("off")
+    plt.colorbar().ax.set_visible(False)
+    plt.xticks([])  
+    plt.yticks([]) 
+ 
+    plt.savefig(out_path, dpi=300,transparent=True,  bbox_inches='tight')
+    plt.close('all')
+    return out_path
+
+
+def get_gradient_color_list(m_color_name, m_num):
+    m_color_list = []
+    m_color_map = plt.get_cmap(m_color_name, m_num)
+    for m_i in range(m_num):
+        m_color = tuple([int(_ * 256) for _ in list(m_color_map([m_i][0])[:-1])])
+        m_color_list.append(m_color)
+    return m_color_list
+
+#瀵煎嚭妯℃澘
+def exportReport(model_name,period):
+    tpl = DocxTemplate(base.prefix + '鍦颁笅姘村彂甯冧俊鎭ā鏉�.docx')
+    cont = archive_report_content(model_name,period)
+    tpl.render(cont)
+    save_path=""
+    tpl.save(save_path)
+    
+
+#鎶ュ憡淇℃伅
+def archive_report_content(model_name,period):  
+    data1 = archive_grd_depth(model_name,period)
+    data2= archive_grd_res(model_name,period)
+    
+    #鍒濆娴佸満
+    initpic = str(int(time.time()))  +"1.png"
+    outpath = base.flow_file + initpic    
+    flow_field(model_name,0,0,"鍒濆娴佸満淇℃伅", "online",outpath)
+
+    currentpic =  str(int(time.time()))  +"2.png"
+    outpath2 = base.flow_file + currentpic 
+    flow_field(model_name,int(period),0,"娴佸満淇℃伅", "online",outpath2)
+    
+    content={
+    "py_avg_water":str(data1[0]),
+    "py_m_water":str(data1[1]),
+    "py_y_water":str(data1[2]),
+    
+    "sq_avg_water":str(data1[3]),
+    "sq_m_water":str(data1[4]),
+    "sq_y_water":str(data1[5]),
+    
+    "yq_avg_water":str(data1[6]),
+    "yq_m_water":str(data1[7]),
+    "yq_y_water":str(data1[8]),
+    
+    "w_m_res":str(data2[0]),
+    "w_y_res":str(data2[1]),
+    "flow1":"/xishan/xinshanFlow/"+initpic,
+    "flow2":"/xishan/xinshanFlow/"+currentpic
+    }
+    if data1[1]>=0:
+        content["py_m_water"]= "鍥炲崌"+str(abs(data1[1]))
+    else:
+         content["py_m_water"]= "涓嬮檷"+str(abs(data1[1]))
+         
+    if data1[2]>=0:
+        content["py_y_water"]= "鍥炲崌"+str(abs(data1[2]))
+    else:
+        content["py_y_water"]= "涓嬮檷"+str(abs(data1[2]))
+    
+    if data1[4]>=0:
+        content["sq_m_water"]= "鍥炲崌"+str(abs(data1[4]))
+    else:
+        content["sq_m_water"]= "涓嬮檷"+str(abs(data1[4]))
+    
+    if data1[5]>=0:
+        content["sq_y_water"]= "鍥炲崌"+str(abs(data1[5]))
+    else:
+        content["sq_y_water"]= "涓嬮檷"+str(abs(data1[5]))
+    
+    if data1[7]>=0:
+        content["yq_m_water"]= "鍥炲崌"+str(abs(data1[7]))
+    else:
+        content["yq_m_water"]= "涓嬮檷"+str(abs(data1[7]))
+    
+    if data1[8]>=0:
+        content["yq_y_water"]= "鍥炲崌"+str(abs(data1[8]))
+    else:
+        content["yq_y_water"]= "涓嬮檷"+str(abs(data1[8]))
+    
+    if data2[1]>=0:
+        content["w_y_res"]= "澧炲姞"+str(abs(data2[1]))
+    else:
+        content["w_y_res"]= "鍑忓皯"+str(abs(data2[1]))
+    return content
+    
+    #姘磋祫婧愰噺
+def archive_grd_res(model_name,period):
+    txt_path = base.model_dir + model_name + "\\water_res.txt"
+    if not os.path.exists(txt_path):
+          CalHead.run_zonebudget_res(model_name)
+    
+    monthdata = CalHead.water_res_month(model_name,txt_path,int(period))
+    monthdata2=[]
+    if int(period) > 0:
+        monthdata2 = CalHead.water_res_month(model_name,txt_path,int(period-1))
+    else:
+        monthdata2 = monthdata
+    
+    water1 = monthdata[0] + monthdata[1]+ monthdata[2]- monthdata[3]
+    water2 =  monthdata2[0] + monthdata2[1] + monthdata2[2] - monthdata2[3]
+    water1 = round(water1 ,4)
+    wat= round(water1-water2 ,4)
+   
+    return [water1,wat]
+    
+#鐢熸垚鏈堝害鎶ュ憡鐨勫唴瀹瑰湴涓嬫按娣�
+def archive_grd_depth(model_name,period): 
+    model_dir = base.model_dir + model_name + "\\modflow.head"
+    head = bf.HeadFile(model_dir)
+    alldata = head.get_alldata()
+
+    #鑾峰彇鏈堝害鏁版嵁
+    per = int(period)
+    current_month_data=[]
+    pre_month_data=[]
+    pre_year_data=[]
+    
+    if per > 0:
+        current_month_data = alldata[int(per+1)*3-1,0,:,:] 
+        pre_month_data= alldata[int(per)*3-1,0,:,:] 
+    else :
+        current_month_data = alldata[int(per+1)*3-1,0,:,:] 
+        pre_month_data=  current_month_data
+        
+    mpdict = getMPDict()
+    YQSDict = getYQSDict()
+    pingyuanArray = mpdict["1"]
+    shanquArray = mpdict["2"]
+    yqArray = YQSDict["1"]
+    
+    #楂樼▼鍧愭爣
+    gc_array= np.array(getTopDis()) 
+    depth_array = np.subtract(gc_array,current_month_data)
+     
+    py_data,py_data2 = 0,0
+    sq_data, sq_data2 = 0,0
+    yqs_data , yqs_data2 = 0,0 
+    # 鍩嬫繁
+    py_depth,sq_depth,yqs_depth =0,0,0
+    k,m,n =0,0,0
+    for item in pingyuanArray:
+        i,j = item[0],item[1]      
+        if current_month_data[i,j]>0:
+            py_depth += depth_array[i,j]
+            py_data += current_month_data[i,j]  
+            k+=1
+        if pre_month_data[i,j]>0:
+            py_data2 += pre_month_data[i,j]
+       
+    for item in shanquArray:
+        i,j = item[0],item[1]
+        if current_month_data[i,j]>0:
+            sq_depth += depth_array[i,j]
+            sq_data += current_month_data[i,j]   
+            m+=1
+        if pre_month_data[i,j]>0:
+            sq_data2 += pre_month_data[i,j]
+     
+    for item in yqArray:
+        i,j = item[0],item[1]
+        if current_month_data[i,j]>0:
+            yqs_depth += depth_array[i,j]
+            yqs_data += current_month_data[i,j] 
+            n+=1
+        if pre_month_data[i,j]>0:
+            yqs_data2 += pre_month_data[i,j]
+        
+    py_data =   py_data/k
+    sq_data =   sq_data/m
+    yqs_data=  yqs_data/n
+    
+    py_data2 = py_data2/k
+    sq_data2 =  sq_data2/m
+    yqs_data2=  yqs_data2/n
+    
+    py_depth = py_depth/k
+    sq_depth = sq_depth/m
+    yqs_depth = yqs_depth/n
+
+    
+    result=[py_depth,py_data-py_data2,0,
+            sq_depth,sq_data-sq_data2,0,
+            yqs_depth,yqs_data-yqs_data2,0]
+    
+    for i in range(len(result)):
+        result[i]= round(result[i],2)
+    
+    return result
+
+
+
+#鑾峰彇鐮旂┒鍖哄煙 灞卞尯 骞冲師鍖� 鐜夋硥灞卞尯鐨勬姌绾垮浘
+def getXs3LineChart(paths):
+    
+    head = bf.HeadFile(paths)
+    alldata = head.get_alldata()
+    months = int(len(alldata)/3)
+     
+    mpdict = getMPDict()
+    YQSDict = getYQSDict()
+    
+    pingyuanArray = mpdict["1"]
+    shanquArray = mpdict["2"]
+    yqArray = YQSDict["1"]
+    
+    result1=[] 
+    result2=[]
+    result3=[]
+    for per in range(months):
+         current_month = alldata[int(per+1)*3-1,0,:,:] 
+         
+         yqs_data, py_data, sq_data = 0.0, 0.0, 0.0
+         M ,N,k = 0,0,0
+         
+         for item in pingyuanArray:
+             i,j = item[0],item[1]
+             if current_month[i,j]>0:
+                 py_data += current_month[i,j]
+                 M+=1
+         py_data= round(py_data/M,2)
+         result1.append(py_data)
+         
+         for item in shanquArray:
+             i,j = item[0],item[1]
+             if current_month[i,j]>0:
+                 sq_data += current_month[i,j]
+                 N+=1
+         sq_data=  round(sq_data/N,2 )
+         result2.append(sq_data)
+         
+         for item in yqArray:
+             i,j = item[0],item[1]
+             if current_month[i,j]>0:
+                 yqs_data += current_month[i,j]
+                 k+=1
+         yqs_data=  round(yqs_data/k,2 )
+         result3.append(yqs_data)
+           
+    
+    dicts={"pyq":result1,"sq":result2,"yqs":result3}
+    return dicts
+    
+    
+
+#灞卞尯/骞冲師鍖虹殑瀛楀吀 
+#骞冲師鍖�=1  灞卞尯 =2
+def getMPDict():
+    arr = np.loadtxt(base.xs_mp_path, dtype=int)
+    dict ={}
+    for i in range(len(arr)):
+        for j in range(len(arr[i])):  
+            zb = str(arr[i][j])
+            if arr[i][j] == -9999:         
+                continue        
+            if zb not in dict:
+                dict[zb] = [(i,j)]         
+            else:
+                dict[zb].append((i,j))  
+    return dict
+
+#鐜夋硥灞辩殑瀛楀吀 
+#鐜夋硥灞� = 1
+def getYQSDict():
+    arr = np.loadtxt(base.yqs_path, dtype=int)
+    dict ={}
+    for i in range(len(arr)):
+        for j in range(len(arr[i])):  
+            zb = str(arr[i][j])
+            if arr[i][j] != 1:         
+                continue        
+            if zb not in dict:
+                dict[zb] = [(i,j)]         
+            else:
+                dict[zb].append((i,j))  
+    return dict    
+   
+#鑾峰彇楂樼▼
+def getTopDis():
+     arr = np.loadtxt(base.dis_top_path, dtype=str)
+     float_array = np.array(arr).astype("float")
+     return float_array
+   
+    
+   
+    
+   
+    
+   
+
+
diff --git a/Base-dev.py b/Base-dev.py
new file mode 100644
index 0000000..4ba30e9
--- /dev/null
+++ b/Base-dev.py
@@ -0,0 +1,173 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Oct 20 16:15:23 2023
+
+@author: ZMK
+"""
+
+import numpy as np
+import shutil
+import os
+
+
+# 鐢熸垚鐨勬祦鍦哄浘鏂囦欢
+flow_file ="D:\\javaCode\\xishan\\xishan\\xishan\\xinshanFlow\\"
+
+prefix ='C:\\Users\\86134\\Desktop\\xsModel2\\'
+
+ZoneBudget64Exe= prefix + "zonebuget\\ZoneBudget64.exe"
+
+water_bal_zones = prefix +"zonebuget\\water_bal.zones\n"
+water_res_zones = prefix +"zonebuget\\water_res.zones\n"
+
+water_lsh_path = prefix + "water_lsh.ini"
+water_yhy_path = prefix + "water_yhy.ini"
+water_dbw_path = prefix + "water_dbw.ini"
+
+
+baseModel = prefix + 'verifyModel\\'
+baseModel2 = prefix + 'verifyModel2\\'
+
+predictModel= prefix + 'predictModel\\'
+
+predictParamModel= prefix + 'predictParamModel\\'
+
+muiltyModel =  prefix + 'muiltyModel\\'
+
+model_dir = prefix + '0612Model\\'
+
+obswellpath = prefix + '鐩戞祴浜�.ini'
+obswell_data_path=  prefix + 'water_obs_data.ini'
+
+obs_well = np.loadtxt(obswellpath, dtype=str,encoding='utf-8')
+
+district_path = prefix +"鍖哄幙.ini"
+
+district= np.loadtxt(district_path, dtype=str,encoding='utf-8')
+
+pumpwellpath = prefix +'鎶芥按浜�.ini'
+
+pump_well = np.loadtxt(pumpwellpath, dtype=str,encoding='utf-8')
+
+period_path = prefix  +"period.json"
+
+areapath = prefix + '鍒嗗尯.ini'
+area_array = np.loadtxt(areapath, dtype=str,encoding='utf-8')
+
+#姘村潎琛¤矾寰�
+water_equ_path = prefix + 'water_equ.ini'
+water_equ = np.loadtxt(water_equ_path, dtype=str,encoding='utf-8')
+
+water_equ_path2022 = prefix + 'water_equ2022.ini'
+water_equ2022 = np.loadtxt(water_equ_path2022, dtype=str,encoding='utf-8')
+
+#鍦拌〃楂樼▼鏁版嵁
+dis_top_path = prefix + 'md_dis_top.ini'
+
+#鍒嗗尯鐨勫偍姘寸郴鏁�
+lpf_path =  prefix + 'md_lpf.ini'
+md_lpf =  np.loadtxt(lpf_path, dtype=str,encoding='utf-8')
+
+# #鐜夋硥灞辩煩闃垫暟鎹�
+yqs_path=  prefix + '鐜夋硥灞辨硥鍒掑垎.ini'
+xs_yqs_matrix =  np.loadtxt(yqs_path, dtype=str,encoding='utf-8')
+
+# #灞卞尯骞冲師鍖虹煩闃�
+xs_mp_path = prefix + '灞卞尯骞冲師鍖哄垝鍒�.ini'
+xs_mp_matrix =  np.loadtxt(xs_mp_path, dtype=str,encoding='utf-8')
+
+           
+model_config ='D:\\javaCode\\xishan\\objclipdig\\ModelFlow_xishan\\config.ini'
+
+model3d_path='D:/javaCode/xishan/xishan/xishan/output2/'
+
+modeldata_csv_path ="C:/Users/86134/Desktop/xsModel2/0612Model/"
+
+exe_path = 'D:/javaCode/xishan/objclipdig/ModelFlow_xishan/ModelFlow_xishan.exe'
+
+
+#璋冨姩 exe 绋嬪簭
+def callModelexe():
+    os.system(exe_path)
+
+
+#鏇存柊妯″瀷鐨別xe閰嶇疆
+def updateModelConfig(model_name):
+    conf = np.loadtxt(model_config, dtype=str,encoding='utf-8')
+    outpath = "outpath=" + model3d_path + model_name
+    csvpath = "csvpath=" + modeldata_csv_path + model_name +"/output"
+    conf[1]=outpath
+    conf[2]=csvpath
+    np.savetxt(model_config,conf, newline='\n', fmt='%s' , encoding='utf-8')
+
+
+
+def getPumpWellName(row,column):
+  
+    for index, r, c,ids, qu ,name in pump_well:
+        if r==row and c == column:
+            return name
+    
+    return "NONE"
+
+
+#鑾峰彇鐭╅樀鍒嗙粍鐨勫瓧鍏哥粨鏋�
+def getAreas():
+    arr = np.loadtxt(areapath, dtype=int)
+    dict ={}
+    for i in range(len(arr)):
+        for j in range(len(arr[i])):  
+            zb = str(arr[i][j])
+            if arr[i][j] == -9999:         
+                continue        
+            if zb not in dict:
+                dict[zb] = [(i,j)]         
+            else:
+                dict[zb].append((i,j))  
+    return dict
+
+
+def getAreaDictFirstIndex():
+    arr = np.loadtxt(areapath, dtype=int)
+    dict ={}
+    for i in range(len(arr)):
+        for j in range(len(arr[i])):           
+            if arr[i][j] == -9999:         
+                continue        
+            if arr[i][j] not in dict:
+                dict[arr[i][j]] = [(i,j)]         
+             
+    return dict
+    
+
+#鑾峰彇鍒嗙粍灏忔爣瀛楀吀鏁版嵁
+def getAreaDictIndexArray():
+    arr = np.loadtxt(areapath, dtype=int)
+    dict_array={}
+    for i in range(len(arr)):
+        for j in range(len(arr[i])):  
+            zb= str(arr[i][j])
+            if arr[i][j] == -9999:         
+                continue        
+            if zb not in dict_array:
+                array= []
+                index = getCellIdByRC(i+1,j+1)
+                array.append(index)    
+                dict_array[zb] = array                              
+            else:            
+                index = getCellIdByRC(i+1,j+1)
+                dict_array[zb].append(index)
+            
+    return dict_array
+
+
+def getCellIdByRC(rowVal, columnVal):   
+    return (rowVal - 1) * 114 + columnVal - 1;
+
+
+
+    
+    
+    
+    
+    
diff --git a/Base.py b/Base.py
index f4542fb..ce9871d 100644
--- a/Base.py
+++ b/Base.py
@@ -6,21 +6,47 @@
 """
 
 import numpy as np
-import shutil
 import os
 
 
+
+#鎴愭灉妯″瀷鐨勯粯璁ゅ悕瀛�
+not_allowed_model="202001_202212"
+
+archive_models=["SP0-0","SP1-1","SP1-2","SP1-3","SP2-1","SP2-2","SP2-3","SP3-1",
+            "SP3-2","SP3-4","SP3-5","SP3-6","SP3-7","SP4-1","SP4-7"]
+
+# 鐢熸垚鐨勬祦鍦哄浘鏂囦欢
+flow_file ="D:\\javaCode\\xishan\\xishan\\xishan\\xinshanFlow\\"
+
 prefix ='C:\\Users\\ZMK\\Desktop\\xsModel2\\'
 
+ZoneBudget64Exe= prefix + "zonebuget\\ZoneBudget64.exe"
+
+water_bal_zones = prefix +"zonebuget\\water_bal.zones\n"
+water_res_zones = prefix +"zonebuget\\water_res.zones\n"
+
+water_lsh_path = prefix + "water_lsh.ini"
+water_yhy_path = prefix + "water_yhy.ini"
+water_dbw_path = prefix + "water_dbw.ini"
+
+
 baseModel = prefix + 'verifyModel\\'
+baseModel2 = prefix + 'verifyModel2\\'
 
 predictModel= prefix + 'predictModel\\'
+predictModel60 = prefix + 'predictModel60\\'
 
 predictParamModel= prefix + 'predictParamModel\\'
+
+muiltyModel =  prefix + 'muiltyModel\\'
 
 model_dir = prefix + '0612Model\\'
 
 obswellpath = prefix + '鐩戞祴浜�.ini'
+obswell_data_path=  prefix + 'water_obs_data.ini'
+
+well_scale_path =  prefix + 'well_scale.ini'
 
 obs_well = np.loadtxt(obswellpath, dtype=str,encoding='utf-8')
 
@@ -44,7 +70,22 @@
 water_equ_path2022 = prefix + 'water_equ2022.ini'
 water_equ2022 = np.loadtxt(water_equ_path2022, dtype=str,encoding='utf-8')
 
-             
+#鍦拌〃楂樼▼鏁版嵁
+dis_top_path = prefix + 'md_dis_top.ini'
+
+#鍒嗗尯鐨勫偍姘寸郴鏁�
+lpf_path =  prefix + 'md_lpf.ini'
+md_lpf =  np.loadtxt(lpf_path, dtype=str,encoding='utf-8')
+
+# #鐜夋硥灞辩煩闃垫暟鎹�
+yqs_path=  prefix + '鐜夋硥灞辨硥鍒掑垎.ini'
+xs_yqs_matrix =  np.loadtxt(yqs_path, dtype=str,encoding='utf-8')
+
+# #灞卞尯骞冲師鍖虹煩闃�
+xs_mp_path = prefix + '灞卞尯骞冲師鍖哄垝鍒�.ini'
+xs_mp_matrix =  np.loadtxt(xs_mp_path, dtype=str,encoding='utf-8')
+
+           
 model_config ='C:\\Users\\ZMK\\Desktop\\objclipdig\\ModelFlow_xishan\\config.ini'
 
 model3d_path='D:/javaCode/xishan/xishan/xishan/output2/'
diff --git a/BigData.py b/BigData.py
new file mode 100644
index 0000000..540147e
--- /dev/null
+++ b/BigData.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Oct 31 16:12:55 2023
+
+@author: ZMK
+"""
+
+
+def mergeWaterData(balArray,resArray):
+    dicts ={}
+   
+    inarray=[]
+    inarray.append({"name":"闄嶆按鍏ユ笚閲�","value":balArray[0]})
+    inarray.append({"name":"娌虫祦鍏ユ笚閲�","value":balArray[1]})
+    inarray.append({"name":"L1渚у悜琛ョ粰閲�","value":balArray[2]})
+    inarray.append({"name":"L3渚у悜琛ョ粰閲�","value":balArray[3]})
+    outarray=[]
+    outarray.append({"name":"浜哄伐寮�閲囬噺","value":balArray[4]})
+    outarray.append({"name":"L1渚у悜娴佸嚭閲�","value":balArray[5]})
+    outarray.append({"name":"L3渚у悜娴佸嚭閲�","value":balArray[6]})
+    dicts["pie1"]=inarray
+    dicts["pie2"]=outarray
+    
+    
+    inarray2=[]
+    inarray2.append({"name":"澶ф皵闄嶆按","value":resArray[0]})
+    inarray2.append({"name":"姘稿畾娌虫笚婕�","value":resArray[1]})
+    inarray2.append({"name":"渚у悜娴佸叆","value":resArray[2]})
+    outarray2=[]
+    outarray2.append({"name":"渚у悜娴佸嚭","value":resArray[3]})
+
+    dicts["pie1"]=inarray
+    dicts["pie2"]=outarray
+    dicts["pie3"]=inarray2
+    dicts["pie4"]=outarray2
+    
+    return dicts
+   
+    
\ No newline at end of file
diff --git a/CalHead.py b/CalHead.py
index 55ecad8..162383f 100644
--- a/CalHead.py
+++ b/CalHead.py
@@ -5,78 +5,129 @@
 @author: ZMK
 """
 
-import flopy
 import flopy.utils.binaryfile as bf
 import csv
 import Base as base
 import os
 import json
+import subprocess
+import re
 import ModelPeriod
 import numpy as np
+import AchiveReport
 
-
+#娣诲姞modle
+def addModelJson(model_name,start_time,end_time,remark):
+  
+      context=""
+      prediction_path = base.prefix + "\\model_list.json"
+      with open(prediction_path,encoding='utf-8') as f:
+             context = json.load(f)   
+      array=[]
+      for item in context:
+             array.append(item) 
+             
+      dicts={"model_name":model_name,
+              "start_time":start_time,"end_time":end_time,"remark":remark} 
+      array.append(dicts)
+      
+      with open(prediction_path, "w",encoding='utf-8') as outfile:
+          json.dump(array, outfile,ensure_ascii=False)
+      return "淇濆瓨瀹屾瘯锛�"; 
+    
+#鍒犻櫎model
+def removeModelJson(model_name):
+      context=""
+      prediction_path = base.prefix + "\\model_list.json"
+      with open(prediction_path,encoding='utf-8') as f:
+             context = json.load(f)   
+      array=[]
+      for item in context:
+          if item["model_name"] != model_name:
+              array.append(item) 
+              
+      with open(prediction_path, "w",encoding='utf-8') as outfile:
+          json.dump(array, outfile,ensure_ascii=False)
+          
+      return "鍒犻櫎妯″瀷瀹屾瘯锛�"; 
+    
 
 def get_model_json(model_name):
       period_json=""
       prediction_path = base.model_dir + model_name +"\\prediction.json"
       with open(prediction_path,encoding='utf-8') as f:
-             period_json = json.load(f)  
-     
+             period_json = json.load(f)    
       return period_json; 
   
 def get_model_period(model_name):
-     period_json=""
-     prediction_path = base.model_dir + model_name +"\\prediction.json"
-     with open(prediction_path,encoding='utf-8') as f:
-             period_json = json.load(f)  
-     
+     period_json= get_model_json(model_name)     
      start_time = period_json["start_time"]
-     end_time = period_json["end_time"]
-      
+     end_time = period_json["end_time"]    
      months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
      return months; 
     
+def is_cloned_model(model_name):
+    paths = base.model_dir + model_name + "\\water_bal.txt"
+    if os.path.exists(paths):
+        return False
+    return True
+    
 
 #瑙傛祴浜昪hart
-def obsChartdata(model_name, row, column):
+def obsChartdata(wellId,model_name, row, column):
     
       row = int(row)-1
       column = int(column)-1
-      dir = base.model_dir + model_name + "\\modflow.head"
-
-      head = bf.HeadFile(dir)
-      alldata = head.get_alldata()
-      period = len(alldata)
-
-      layer = 3
-
-      xdata = []
-      ydata = []
-      result = {}
-      for per in range(period):
-           for lay in range(layer):
-               if per % 3 == 0 and lay == 0:
-                   md = (int)(lay / 3 + 1)
-                   per_array = alldata[per][lay]
-
-                   cell_data = (float)(per_array[row][column])
-                   ydata.append(cell_data)
-    
-      period_json= get_model_json(model_name)
-      
+      dir=""    
+      period_json= get_model_json(model_name)    
       start_time = period_json["start_time"]
       end_time = period_json["end_time"]
+      xmonths = ModelPeriod.get_months_in_range_ym(start_time, end_time)
       
-      months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
-
-      result = {"y_data": ydata, "x_data": months}
+      if model_name == base.not_allowed_model:
+          dir = base.baseModel2  + "\\modflow.head"
+      else:
+          dir = base.model_dir + model_name + "\\modflow.head"
+          cloned = is_cloned_model(model_name)
+          if cloned ==True:
+              return  {"y_data": [],"y_data2":[],"x_data":xmonths }
+          
+      head = bf.HeadFile(dir)
+      alldata = head.get_alldata()
+      
+      #鍛ㄦ湡鏁�
+      months = int(len(alldata)/3)
+      ydata= []
+      result = {}
+      for month in range(months):
+          z1= alldata[int(month+1)*3-1,0,:,:]
+          cell_data = float(z1[row][column])
+          ydata.append(round(cell_data,2))
+    
+      y_data2=[]
+      if model_name == base.not_allowed_model:
+          array_data =  np.loadtxt(base.obswell_data_path, dtype=str,encoding='utf-8')
+          y_data2= getObsData(wellId,array_data)
+          
+      result = {"y_data": ydata,"y_data2":y_data2,"x_data": xmonths}
       return result
 
-def getRowCloumnById(index_id):
-    row = 104
-    column =114
-    count=0
+def getObsData(wellId,array_data):
+    result =[]
+    new_list =[]
+    for item in array_data:
+        if item[0]==wellId:
+            result.append(item[3])
+    for i in range(0,len(result),3):
+        data =( float(result[i]) +float(result[i+1])+float(result[i+2]))/3
+        data = round(data,2)
+        new_list.append(data)
+        
+    return new_list;
     
+    
+def getRowCloumnById(index_id):
+    row,column,count = 104,114,0
     for  i in range(row):
         for j in range(column):
             if index_id == count:
@@ -89,116 +140,39 @@
 #鍦颁笅姘翠俊鎭�
 def earthWaterChart(model_name, index_id):
     
-      row_column =  getRowCloumnById(index_id)
-      
+      row_column =  getRowCloumnById(index_id)  
       row = row_column[0]
       column = row_column[1]
-      dir = base.model_dir + model_name + "\\modflow.head"
-
-      head = bf.HeadFile(dir)
-      alldata = head.get_alldata()
-      period = len(alldata)
-
-      layer = 3
-
-      ydata = []
-      result = {}
-      for per in range(period):
-           for lay in range(layer):
-               if per % 3 == 0 and lay == 0:
-                  
-                   per_array = alldata[per][lay]
-
-                   cell_data = (float)(per_array[row][column])
-                   ydata.append(cell_data)
-    
-      period_json= get_model_json(model_name)
       
+      period_json= get_model_json(model_name)    
       start_time = period_json["start_time"]
       end_time = period_json["end_time"]
+      xmonths = ModelPeriod.get_months_in_range_ym(start_time, end_time)
+      dir = ""
+      if model_name == base.not_allowed_model:
+          dir = base.baseModel2  + "\\modflow.head"
+      else:
+          dir = base.model_dir + model_name + "\\modflow.head"
+          cloned = is_cloned_model(model_name)
+          if cloned ==True:
+              return  {"y_data": [],"x_data":xmonths }
       
-      months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
+      head = bf.HeadFile(dir)
+      alldata = head.get_alldata()
+      
+      #鍛ㄦ湡鏁�
+      months = int(len(alldata)/3)
+      ydata= []
+      result = {}
+      for month in range(months):
+          z1= alldata[int(month+1)*3-1,0,:,:]
+          cell_data = float(z1[row][column])
+          ydata.append(round(cell_data,2))
 
-      result = {"y_data": ydata, "x_data": months}
+      result = {"y_data": ydata, "x_data": xmonths}
       return result
-
-def heatmapdata(model_name,period):
-    dir = base.model_dir + model_name + "\\modflow.head"
-      
-    head = bf.HeadFile(dir)
-
-    alldata = head.get_alldata()
+   
     
-    index = int(period)*3
-    return alldata[index][0]
-
-
-#姘村潎琛¤绠�
-def waterEqu(model_name):
-    if model_name == '202001_202212':  
-        water_equ_path = base.prefix + "\\water_equ.json"
-        with open(water_equ_path,encoding='utf-8') as f:
-             data = json.load(f)
-             return data       
-    else:
-        year = model_name[0:4]
-        title =[year]
-        dict ={"title":title}
-        
-        celldata = np.array(base.water_equ2022).tolist()
-      
-        predict_json= get_model_json(model_name)
-        
-        a1=float(celldata[0])
-        a2=float(celldata[1])
-        a3=float(celldata[2])
-        a4=float(celldata[3])
-        
-        b1=float(celldata[4])
-        b2=float(celldata[5])
-        b3=float(celldata[6])
-        
-        if predict_json["rain"]:
-            a1= float(predict_json["rain"]["ratio"]) * float(celldata[0])  
-            a3= float(predict_json["rain"]["ratio"]) * float(celldata[2]) 
-            a4= float(predict_json["rain"]["ratio"]) * float(celldata[3])        
-            b2= float(predict_json["rain"]["ratio"]) * float(celldata[5]) 
-            b3= float(predict_json["rain"]["ratio"]) * float(celldata[6])     
-        if predict_json["river"]:
-            a2=  float(predict_json["river"]["ratio"]) * float(celldata[1])
-            
-        if predict_json["mine"]:
-            b1=  b1    
-        
-        in_data= a1+a2+a3+a4
-        out_data= b1 +b2 + b3
-        float_data=[a1,a2,a3,a4,in_data,b1,b2,b3,out_data,in_data-out_data]
-        
-        inarray=[]
-        inarray.append({"name":"闄嶆按鍏ユ笚閲�","value":a1})
-        inarray.append({"name":"娌虫祦鍏ユ笚閲�","value":a2})
-        inarray.append({"name":"L1渚у悜琛ョ粰閲�","value":a3})
-        inarray.append({"name":"L3渚у悜琛ョ粰閲�","value":a4})
-        outarray=[]
-        outarray.append({"name":"浜哄伐寮�閲囬噺","value":b1})
-        outarray.append({"name":"L1渚у悜娴佸嚭閲�","value":b2})
-        outarray.append({"name":"L3渚у悜娴佸嚭閲�","value":b3})
-        pie1={str(year):inarray}
-        pie2={str(year):outarray}
-        
-        dict["pie1"]=pie1
-        dict["pie2"]=pie2
-        
-        array2d=[]
-        array2d.append([str(year)])
-        for i in range(len(float_data)):
-            tmp=[]
-            tmp.append(str(float_data[i]))
-            array2d.append(tmp)
-        dict["data"]=array2d
-        return dict
-        
-
 #瀵煎嚭csv鏂囦欢
 def exportCsV(model_name):
     
@@ -207,26 +181,652 @@
     if not os.path.exists(out_path):
          os.mkdir(out_path)  
     
-    head = bf.HeadFile(dir)
-
+    # z1= alldata[int(per1)*3,0,:,:]
+    head = bf.HeadFile(dir) 
     alldata = head.get_alldata()
-    month = len(alldata)
-    layer = 3
-
-    for i in range(month):
-       for j in range(layer):
-          if i % 3 == 0:
-            md = (int)(i / 3 + 1)
-            filename = out_path + str(md) + '-' + str(j+1) + '.csv'
+    months = int(len(alldata)/3)  
+    layers = 3
+    #渚嬪 0-36鏈�
+    for month in range(months):
+        for layer in range (layers):
+            z_last= alldata[(month+1)*3-1,layer,:,:]
+            
+            filename = out_path + str(month+1) + '-' + str(layer+1) + '.csv'
             f = open(filename, 'w', newline='')
             writer = csv.writer(f)
-            for p in alldata[i][j]:
+           
+            for p in z_last:
                writer.writerow(p)
             f.close()
-
+            
     return out_path
  
+    
+#姘村潎琛¤绠�
+def waterEqu(model_name):
+    if model_name == base.not_allowed_model:  
+        water_equ_path = base.prefix + "\\water_equ.json"
+        with open(water_equ_path,encoding='utf-8') as f:
+             data = json.load(f)
+             return data       
+    else:
+        year = model_name
+        title =[year]
+        dict ={"title":title}
+        
+        jx = get_model_json(model_name)
+        dict["start_time"]=jx["start_time"]
+        dict["end_time"]=jx["end_time"]
+        
+        paths=base.model_dir + model_name +"\\water_bal.txt"
+        wat = water_balance(model_name, paths)
+        
+        in_data= round(wat[0]+ wat[1]+ wat[2]+ wat[3] , 4)
+        out_data= round(wat[4] + wat[5] +  wat[6], 4)
+        inout = round(in_data-out_data, 4)
+        float_data=[wat[0],wat[1],wat[2],wat[3],in_data,
+                     wat[4],wat[5], wat[6],out_data,inout]
+        
+        inarray=[]
+        inarray.append({"name":"闄嶆按鍏ユ笚閲�","value":wat[0]})
+        inarray.append({"name":"娌虫祦鍏ユ笚閲�","value":wat[1]})
+        inarray.append({"name":"涓�灞備晶鍚戣ˉ缁欓噺","value":wat[2]})
+        inarray.append({"name":"涓夊眰渚у悜琛ョ粰閲�","value":wat[3]})
+        outarray=[]
+        outarray.append({"name":"浜哄伐寮�閲囬噺","value":wat[4]})
+        outarray.append({"name":"涓�灞備晶鍚戞祦鍑洪噺","value":wat[5]})
+        outarray.append({"name":"涓夊眰渚у悜娴佸嚭閲�","value":wat[6]})
+        pie1={str(year):inarray}
+        pie2={str(year):outarray}
+        
+        dict["pie1"]=pie1
+        dict["pie2"]=pie2
+        
+        array2d=[]
+        array2d.append(["鏁版嵁锛堜嚎绔嬫柟绫筹級"])
+        for i in range(len(float_data)):
+            tmp=[]
+            tmp.append(str(float_data[i]))
+            array2d.append(tmp)
+        dict["data"]=array2d
+        return dict        
 
 
 
+def run_zonebudget_bal(model_name):
+    # 瀹氫箟exe鏂囦欢鐨勮矾寰勫拰鍙傛暟  
+    exe_path = base.ZoneBudget64Exe
+    txt_path = base.model_dir + model_name + "\\water_bal.txt\n"
+    cell_path = base.model_dir + model_name + "\\modflow.flow\n"
+    process = subprocess.Popen([exe_path], stdin=subprocess.PIPE,shell = True) 
+    process.stdin.write(txt_path.encode())  # 杈撳叆鍙傛暟1  
+    process.stdin.write(cell_path.encode()) 
+    process.stdin.write(b"title\n") 
+    process.stdin.write(base.water_bal_zones.encode()) 
+    process.stdin.write(b"A\n")
+    output, _ = process.communicate()
+    print(output)
+
+
+def run_zonebudget_res(model_name):
+    # 瀹氫箟exe鏂囦欢鐨勮矾寰勫拰鍙傛暟  
+    exe_path = base.ZoneBudget64Exe
+    txt_path = base.model_dir + model_name + "\\water_res.txt\n"
+    cell_path = base.model_dir + model_name + "\\modflow.flow\n"
+    process = subprocess.Popen([exe_path], stdin=subprocess.PIPE,shell = True) 
+    process.stdin.write(txt_path.encode())  # 杈撳叆鍙傛暟1  
+    process.stdin.write(cell_path.encode()) 
+    process.stdin.write(b"title\n") 
+    process.stdin.write(base.water_res_zones.encode()) 
+    process.stdin.write(b"A\n")
+    output, _ = process.communicate()
+    print(output)
+
+def reg_find_int(text):
+    numbers = re.findall(r'\d+', text)
+    return numbers
+
+
+def read_txt(path):
+    data =[]
+    with open(path, 'r') as file:
+        lines = file.readlines()   
+        for line in lines:
+           data.append(line)
+    return data  
    
+#瑙f瀽姘村潎琛℃暟鎹� 
+def water_balance(model_name,paths):
+    data= read_txt(paths)  
+    lens = len(data) 
+    index = 0 
+    segment=[]
+    dicts={}
+    flag = 0
+    title=""
+    while index < lens:
+        strs = data[index].strip()
+        if strs.startswith("Flow Budget for Zone"):
+            segment=[]
+            flag = 1
+            title=strs
+        if strs.startswith("Percent Discrepancy"):
+            segment.append(strs) 
+            numbers = reg_find_int(title)
+            key = ','.join(numbers)
+            dicts[key]=segment
+            flag = 0       
+        if flag ==1 :
+            segment.append(strs)
+        index=index+1
+        
+    recharge = 0  
+    for key in dicts:
+        array = dicts[key]
+        temp=[]
+        for item in array:
+            if item.startswith("RECHARGE") :
+                strs = item.replace(" ", "").replace("RECHARGE=", "")
+                temp.append(float(strs))
+                
+        recharge += (temp[0]-temp[1])
+    
+    #娌虫祦鍏ユ笚閲忥紙姝o級	鎵�鏈夊簲鍔涙湡锛宖or Zone 3鐨刉ELLS椤圭浉鍔�
+    zone3 = 0
+    for key in dicts:
+        if key.startswith("3,"):
+            array = dicts[key]
+            temp=[]
+            for item in array:
+                 if item.startswith("WELLS") :
+                     strs = item.replace(" ", "").replace("WELLS=", "")
+                     temp.append(float(strs))
+       
+            zone3 += (temp[0]-temp[1])
+             
+    #L1渚у悜琛ョ粰閲忥紙姝o級	鎵�鏈夊簲鍔涙湡锛宖or Zone 4鐨処N-WELLS椤圭浉鍔�
+    Zone4_in_well=0
+    for key in dicts:
+        if key.startswith("4,"):
+            array = dicts[key]
+            for item in array:
+                 if item.startswith("WELLS") :
+                     strs = item.replace(" ", "").replace("WELLS=", "")
+                     data = float(strs)
+                     Zone4_in_well +=data
+                     break
+    
+    #L3渚у悜琛ョ粰閲忥紙姝o級	鎵�鏈夊簲鍔涙湡锛宖or Zone 8鐨処N-WELLS椤圭浉鍔�
+    Zone8_in_well =0 
+    for key in dicts:
+        if key.startswith("8,"):
+            array = dicts[key]
+            for item in array:
+                 if item.startswith("WELLS") :
+                     strs = item.replace(" ", "").replace("WELLS=", "")
+                     data = float(strs)
+                     Zone8_in_well +=data
+                     break   
+    
+    #浜哄伐寮�閲囬噺锛堣礋锛�	鎵�鏈夊簲鍔涙湡锛宖or Zone 5鐨凮UT-WELLS椤圭浉鍔�
+    zone5out =0
+    for key in dicts:
+        if key.startswith("5,"):
+            array = dicts[key]
+            for item in array:
+                 if item.startswith("WELLS") :
+                     strs = item.replace(" ", "").replace("WELLS=", "")
+                     data = float(strs)
+                     zone5out +=data 
+     
+      #L1渚у悜娴佸嚭閲忥紙璐燂級	鎵�鏈夊簲鍔涙湡锛宖or Zone 4鐨凮UT-WELLS椤圭浉鍔�  
+    Zone4_out_well=0  
+    for key in dicts:
+        if key.startswith("4,"):
+            array = dicts[key]
+            for item in array:
+                 if item.startswith("WELLS") :
+                     strs = item.replace(" ", "").replace("WELLS=", "")
+                     data = float(strs)
+                     Zone4_out_well +=data
+     
+    # L3=L3渚у悜娴佸嚭閲忥紙璐燂級
+    L3=0.0
+    result =[recharge,zone3,Zone4_in_well,Zone8_in_well,zone5out,Zone4_out_well,L3]
+    for i in range(len(result)):
+        # 姣忚鏁版嵁瀹為檯涓婅绠� 鐨勬槸 涓�涓猻tep , 1涓猻tep =10澶�
+        result[i]= round(result[i]/100000000*10, 4)
+    
+    return result
+
+
+#瑙f瀽姘磋祫婧愰噺鏁版嵁 
+def water_res(model_name,paths):
+    data= read_txt(paths) 
+    lens = len(data)
+    index = 0
+    segment=[]
+    dicts={}
+    flag = 0
+    title=""
+    while index < lens:
+        strs = data[index].strip()
+        if strs.startswith("Flow Budget for Zone"):
+            segment=[]
+            flag = 1
+            title=strs
+        if strs.startswith("Percent Discrepancy"):
+            segment.append(strs) 
+            numbers = reg_find_int(title)
+            key = ','.join(numbers)
+            dicts[key]=segment
+            flag = 0       
+        if flag ==1 :
+            segment.append(strs)
+        index=index+1
+        
+    # 澶ф皵闄嶆按	for zone1閲屾墍鏈夌殑recharge椤圭浉鍔狅紙鍖呮嫭in鍜宱ut锛�
+    zone1_rechage = 0
+    
+    for key in dicts:
+       if key.startswith("1,"):
+          array = dicts[key]
+          temp=[]
+          for item in array:
+              if item.startswith("RECHARGE") :
+                  strs = item.replace(" ", "").replace("RECHARGE=", "")
+                  temp.append(float(strs))
+                
+          zone1_rechage += (temp[0]-temp[1])
+    
+    #姘稿畾娌虫笚婕�	for zone1閲屾墍鏈夌殑zone2椤圭浉鍔狅紙鍖呮嫭in鍜宱ut锛�
+    zone1_well = 0
+    for key in dicts:
+        if key.startswith("1,"):
+            array = dicts[key]
+            indata,outdata= 0,0
+            for item in array:
+                 if item.startswith("Zone   2 to   1") :
+                     strs = item.replace(" ", "").replace("Zone2to1=", "")
+                     indata = float(strs)
+                     
+                 if item.startswith("Zone   1 to   2") :
+                     strs = item.replace(" ", "").replace("Zone1to2=", "")
+                     outdata = float(strs)
+       
+            zone1_well += (indata-outdata)
+           
+    #渚у悜娴佸叆	For zone7 閲屾墍鏈夌殑zone8椤圭浉鍔狅紙鍖呮嫭in鍜宱ut锛�
+    zone7=0
+    for key in dicts:
+        if key.startswith("7,"):
+            array = dicts[key]
+            indata,outdata= 0,0
+            for item in array:
+                 if item.startswith("Zone   8 to   7") :
+                     strs = item.replace(" ", "").replace("Zone8to7=", "")
+                     indata = float(strs)
+                   
+                 if item.startswith("Zone   7 to   8") :
+                     strs = item.replace(" ", "").replace("Zone7to8=", "")
+                     outdata = float(strs)
+                    
+            zone7 += (indata-outdata)
+    
+    #瓒婃祦鎺掓硠	For zone6 閲屾墍鏈夌殑zone7椤圭浉鍔狅紙鍖呮嫭in鍜宱ut锛�
+    zone6 =0 
+    for key in dicts:
+        if key.startswith("6,"):
+            array = dicts[key]
+            indata,outdata= 0,0
+            for item in array:
+                 if item.startswith("Zone   7 to   6") :
+                     strs = item.replace(" ", "").replace("Zone7to6=", "")
+                     indata = float(strs)
+                     
+                 if item.startswith("Zone   6 to   7") :
+                     strs = item.replace(" ", "").replace("Zone6to7=", "")
+                     outdata = float(strs)
+            zone6 += (indata-outdata)
+    
+    result =[zone1_rechage,zone1_well,zone7,zone6]
+    for i in range(len(result)):
+        result[i]= round(result[i]/100000000*10,4)
+    return result
+
+
+ #瑙f瀽姘磋祫婧愰噺鏁版嵁 
+def water_res_month(model_name,paths,per):
+    data= read_txt(paths) 
+    lens = len(data)
+    index = 0
+    segment=[]
+    dicts={}
+    flag = 0
+    title=""
+    while index < lens:
+        strs = data[index].strip()
+        if strs.startswith("Flow Budget for Zone"):
+            segment=[]
+            flag = 1
+            title=strs
+        if strs.startswith("Percent Discrepancy"):
+            segment.append(strs) 
+            numbers = reg_find_int(title)
+            key = ','.join(numbers)
+            dicts[key]=segment
+            flag = 0       
+        if flag ==1 :
+            segment.append(strs)
+        index=index+1
+        
+    # 澶ф皵闄嶆按	for zone1閲屾墍鏈夌殑recharge椤圭浉鍔狅紙鍖呮嫭in鍜宱ut锛�
+    zone1_rechage = 0
+    zone1_keys=[ "1,1,"+str(per+1),"1,2,"+str(per+1),"1,3,"+str(per+1)]
+    for key in zone1_keys:
+          array = dicts[key]
+          temp=[]
+          for item in array:
+              if item.startswith("RECHARGE") :
+                  strs = item.replace(" ", "").replace("RECHARGE=", "")
+                  temp.append(float(strs))   
+          zone1_rechage += (temp[0]-temp[1])
+          
+    #姘稿畾娌虫笚婕�	for zone1閲屾墍鏈夌殑zone2椤圭浉鍔狅紙鍖呮嫭in鍜宱ut锛�
+    zone1_well = 0
+    zone1_well_keys=["1,1,"+str(per+1),"1,2,"+str(per+1),"1,3,"+str(per+1)]
+    for key in zone1_well_keys:
+            array = dicts[key]
+            indata,outdata= 0,0
+            for item in array:
+                 if item.startswith("Zone   2 to   1") :
+                     strs = item.replace(" ", "").replace("Zone2to1=", "")
+                     indata = float(strs)
+                     
+                 if item.startswith("Zone   1 to   2") :
+                     strs = item.replace(" ", "").replace("Zone1to2=", "")
+                     outdata = float(strs)
+       
+            zone1_well += (indata-outdata)
+           
+    #渚у悜娴佸叆	For zone7 閲屾墍鏈夌殑zone8椤圭浉鍔狅紙鍖呮嫭in鍜宱ut锛�
+    zone7=0
+    zone7_keys=["7,1,"+str(per+1),"7,2,"+str(per+1),"7,3,"+str(per+1)]
+    for key in zone7_keys:
+            array = dicts[key]
+            indata,outdata= 0,0
+            for item in array:
+                 if item.startswith("Zone   8 to   7") :
+                     strs = item.replace(" ", "").replace("Zone8to7=", "")
+                     indata = float(strs)
+                     
+                 if item.startswith("Zone   7 to   8") :
+                     strs = item.replace(" ", "").replace("Zone7to8=", "")
+                     outdata = float(strs)
+            zone7 += (indata-outdata)
+  
+    #瓒婃祦鎺掓硠	For zone6 閲屾墍鏈夌殑zone7椤圭浉鍔狅紙鍖呮嫭in鍜宱ut锛�
+    zone6 =0 
+    zone6_keys=["6,1,"+str(per+1),"6,2,"+str(per+1),"6,3,"+str(per+1)]
+    for key in zone6_keys:
+            array = dicts[key]
+            indata,outdata= 0,0
+            for item in array:
+                 if item.startswith("Zone   7 to   6") :
+                     strs = item.replace(" ", "").replace("Zone7to6=", "")
+                     indata = float(strs)
+                     
+                 if item.startswith("Zone   6 to   7") :
+                     strs = item.replace(" ", "").replace("Zone6to7=", "")
+                     outdata = float(strs)
+            zone6 += (indata-outdata)
+ 
+    result =[zone1_rechage,zone1_well,zone7,zone6]
+    for i in range(len(result)):
+        result[i]= round(result[i]/100000000*10, 4)
+    
+    return result  
+
+#澶氭ā鍨嬬殑姘翠綅
+def water_depth(model_name):
+    name_array = model_name.split(",")
+    
+    yhyMatrix = np.loadtxt(base.water_yhy_path, dtype=str,encoding='utf-8')
+    lshMatrix  = np.loadtxt(base.water_lsh_path, dtype=str,encoding='utf-8')
+    dbwMatrix =  np.loadtxt(base.water_dbw_path, dtype=str,encoding='utf-8')
+    
+    res ={} 
+    #棰愬拰鍥紝鑾茬煶婀栵紝涓滃寳鏃虹殑姘村钩绾垮拰澶氭ā鍨嬬殑澶氭姌绾垮�� 
+    yhydata=[]
+    base1={"name":"鍦拌〃楂樼▼","data":[52]*12}
+    yhydata.append(base1)
+    
+    lshdata=[]
+    base2={"name":"鍦拌〃楂樼▼","data":[80]*12}
+    lshdata.append(base2)
+    
+    dbwdata=[]
+    base3={"name":"鍦拌〃楂樼▼","data":[49]*12}
+    dbwdata.append(base3)
+    
+    months = ModelPeriod.get_months_in_range_ym("2023-01","2023-12")
+    
+    for i in range(len(name_array)):
+        if name_array[i] != '':
+ 
+             index = muiltyModelIndex(name_array[i])
+             array1 = get_column(yhyMatrix,index)
+             array2 = get_column(lshMatrix,index)
+             array3 = get_column(dbwMatrix,index)
+                
+             yhydata.append({"name":name_array[i],"data":convertColumnData(array1)})
+             lshdata.append({"name":name_array[i],"data":convertColumnData(array2)})
+             dbwdata.append({"name":name_array[i],"data":convertColumnData(array3)})
+            
+    rchMatrix = np.loadtxt(base.prefix + "base_water.ini", dtype=str,encoding='utf-8')
+    riverMatrix = np.loadtxt(base.prefix + "base_river.ini", dtype=str,encoding='utf-8')
+    pumpMatrix = np.loadtxt(base.prefix + "base_mining.ini", dtype=str,encoding='utf-8')
+    
+    rchdata=[]
+    rch_base1 = rchMatrix[1]
+    rch_base1_float =[]
+    for i in range (0,len(rch_base1)):
+         float_data =  round(float(rch_base1[i])/9,2)
+         rch_base1_float.append(float_data)
+    
+    rchdata.append({"name":"鍩哄噯鍊�","data":rch_base1_float})
+    
+    riverdata=[]
+    riverdata.append({"name":"鍩哄噯鍊�","data":riverMatrix.astype(float).tolist()})
+    
+    pumpdata=[]
+    pumpX=pumpMatrix[1]
+    pump_float=[]
+    for i in range (0,len(pumpX)):
+         float_data =  round(float(pumpX[i]),2)
+         pump_float.append(float_data)
+    
+    pumpdata.append({"name":"鍩哄噯鍊�","data":pump_float})     
+       
+    res["xAxis"]  = months
+    res["yhy_line"]  = yhydata
+    res["lsh_line"]  = lshdata
+    res["dbw_line"]  = dbwdata
+    
+    
+    for i in range(len(name_array)):
+        if name_array[i] != '':
+            rchdata.append(rchBaseResult(rchMatrix,name_array[i]))
+            riverdata.append(riverBaseResult(riverMatrix, name_array[i]))
+            pumpdata.append(pumpBaseResult(pumpMatrix, name_array[i]))
+            
+    res["rch_line"]  = rchdata
+    res["river_line"]  = riverdata
+    res["pump_line"]  = pumpdata     
+    
+    yqsdata=[]
+    pyqdata=[]
+    sqdata=[]
+    for i in range(len(name_array)):
+         if name_array[i] != '':
+             paths = base.muiltyModel + name_array[i] + "\\modflow.head"
+             resdata = AchiveReport.getXs3LineChart(paths)    
+             pyqdata.append({"name":name_array[i],"data":resdata["pyq"]})
+             sqdata.append({"name":name_array[i],"data":resdata["sq"]})
+             yqsdata.append({"name":name_array[i],"data":resdata["yqs"]})
+             
+    res["yqs_line"]  = yqsdata  
+    res["sq_line"]  = sqdata 
+    res["pyq_line"]  = pyqdata            
+
+    return res 
+
+#灏忓満鏅按浣� 鍜屽彉骞�
+def xs_depth(model_name):
+    res={}
+    line1,line2=[],[]
+    paths = base.model_dir + model_name + "\\modflow.head"
+    if model_name == base.not_allowed_model:
+        paths = base.baseModel2 + "\\modflow.head"
+    
+    resdata = AchiveReport.getXs3LineChart(paths)    
+    line1.append({"name":"骞冲師鍖�","data":roundArray(resdata["pyq"])})
+    line1.append({"name":"灞卞尯","data":roundArray(resdata["sq"])})
+    line1.append({"name":"鐜夋硥灞卞湴鍖�","data":roundArray(resdata["yqs"])})
+    res["depth"]  = line1  
+    
+    line2.append({"name":"骞冲師鍖�","data":xs_bf(resdata["pyq"])})
+    line2.append({"name":"灞卞尯","data":xs_bf(resdata["sq"])})
+    line2.append({"name":"鐜夋硥灞卞湴鍖�","data":xs_bf(resdata["yqs"])})
+    res["bf"]  = line2          
+    return res 
+
+def xs_bf(array):
+    newlist=[]
+    newlist.append(0)
+    lens = len(array)-1
+    for i in range(0,lens):
+        x = array[i+1]-array[i]
+        newlist.append(round(x,2))
+    return newlist
+
+def roundArray(array):
+    newlist=[]
+    for item in array:
+        item = round(item,2)
+        newlist.append(item)
+    return newlist
+
+#rch 鍩哄噯鏁版嵁
+def rchBaseResult(rchMatrix,sp):
+    rchDict ={}   
+    rch_base1 = rchMatrix[1]
+    rch_base2 = rchMatrix[2]
+    rch_base3 = rchMatrix[3]    
+    trump = getmuiltyModelparam(sp)
+    
+    types = trump[0]
+    rch_x = trump[1]
+    
+    if types ==1:
+        temp = muiltyArray(rch_base1,rch_x)
+        for i in range(0,len(temp)):
+            temp[i] =round(temp[i]/9,2)    
+        rchDict={"name":sp,"data":temp}
+    if types ==2:
+        temp = muiltyArray(rch_base2,rch_x)
+        for i in range(0,len(temp)):
+            temp[i] =round(temp[i]/9,2) 
+        rchDict={"name":sp,"data":temp}    
+    if types ==3:
+        temp = muiltyArray(rch_base3,rch_x)
+        for i in range(0,len(temp)):
+            temp[i] =round(temp[i]/9,2) 
+        rchDict={"name":sp,"data":temp}
+    
+    return rchDict
+
+#娌虫祦鍩哄噯 鏁版嵁
+def riverBaseResult(riverMatrix,sp):   
+    trump = getmuiltyModelparam(sp)  
+    river_x = trump[2]
+    riverDict={"name":sp,"data":muiltyArray(riverMatrix,river_x)}
+    return riverDict
+
+
+#鎶芥按浜曞熀鍑嗘暟鎹�
+def pumpBaseResult(pumpMatrix,sp):
+    trump = getmuiltyModelparam(sp)
+    pump_x =trump[3]
+    pumpDict={"name":sp,"data":muiltyArray(pumpMatrix[1],pump_x)}
+    return pumpDict
+    
+
+def muiltyArray(array,scale):
+    result =[]
+    for item in array:
+        x= round(float(item) * scale,2)
+        result.append(x)
+    return result
+
+
+def convertColumnData(array):
+    result =[]
+    new_list=[]
+    for i in range(len(array)):
+        if i!= 0:         
+             data =  transToNum(array[i])
+             result.append(data) 
+    for index in range(len(result)):
+        if index % 3 == 0:
+            new_list.append(result[index])     
+    return new_list   
+
+def transToNum(str):
+    data = 0
+    try:
+        data=  round(float(str),2) 
+        return data
+    except ValueError():
+        return 0    
+  
+     
+ #鑾峰彇棰勬祴鍦烘櫙鐨勪笅鏍囧�硷紝 闇�瑕佷粠ini 鏂囦欢涓寜鐓у垪杩涜璇诲彇
+def muiltyModelIndex(name):
+    models= muiltyModelList()
+    indexs = models.index(name)
+    return indexs
+
+#棰勬祴妯″瀷鍒楄〃
+def  muiltyModelList():
+    models=["SP0-0","SP1-1","SP1-2","SP1-3","SP2-1","SP2-2","SP2-3","SP3-1",
+            "SP3-2","SP3-4","SP3-5","SP3-6","SP3-7","SP4-1","SP4-7"]
+    return models
+    
+   
+
+#Np鑾峰彇鏌愪竴鍒楃殑鍊�
+def get_column(matrix, column_number):
+    column = matrix[:, column_number]
+    return column
+
+#鍏冪粍涓� 1=闄嶉洦绫诲瀷  2=鍊嶆暟 3,娌虫祦鍊嶆暟 4,鎶芥按浜曞�嶆暟
+def getmuiltyModelparam(sp):
+    dicts={
+        "SP0-0":(1,1,1,1),
+        "SP1-1":(2,1,1,1),
+        "SP1-2":(3,1,1,1),
+        "SP1-3":(3,1.2,1,1),     
+        "SP2-1":(1,1,2,1),
+        "SP2-2":(1,1,5,1),
+        "SP2-3":(1,1,10,1),     
+        "SP3-1":(1,1,1,0.25),
+        "SP3-2":(1,1,1,0.5),
+        "SP3-4":(1,1,1,0),    
+        "SP3-5":(1,1,1,0.4),
+        "SP3-6":(1,1,1,0.3),
+        "SP3-7":(1,1,1,0.6),
+        
+        "SP4-1":(1,1,2,0.5),
+        "SP4-7":(3,1.2,10,0)}
+    return dicts[sp]
diff --git a/DataTask.py b/DataTask.py
new file mode 100644
index 0000000..686b443
--- /dev/null
+++ b/DataTask.py
@@ -0,0 +1,325 @@
+import cx_Oracle
+import pymssql
+from datetime import datetime, timedelta
+import numpy as np
+# select * from swzdh.rain;
+# select * from swzdh.river;
+# select * from swzdh.gw;
+
+
+
+#楹诲唱闆ㄩ噺绔�
+mayu_rainfall="30747850"
+
+#闄囬┚搴勬按鏂囩珯
+ljz_swz="30700450"
+
+#LQWB锛堥緳娉夛級/ZSSC锛堝啀鐢熸按鍘傦級/WTY锛堟ⅶ妗愯嫅锛�/LSH锛堣幉鐭虫箹锛�/HZZ锛堜警搴勫瓙锛�/GC锛堝彜鍩庯級
+arr1=["30773702","30769360","30769280","30567304","30769320","30567303"]
+
+
+haidian_rainfall="xxxx"
+
+#SXC锛堝弻鍏存潙93锛�/XM1锛堥鍜屽洯瑗块棬1锛�/XM2锛堥鍜屽洯瑗块棬2锛�/SYSC锛堟按婧愪笁鍦�336锛�/SJYY锛堝洓瀛e尽鍥�2锛�/BW锛堝寳鍧烇級
+arr2=["30566324","30565080","30564400","30566335","30564840","30564880"]
+
+#鑾峰彇姘稿畾娌�15澶╃殑棰勬祴闇�瑕佺殑鏁版嵁
+def get_ydh15_real_data():
+    result =[]
+    
+    current_time = datetime.now()
+    start_time =  current_time- timedelta(days=60)
+    times=[]
+    #鏃堕棿搴忓垪
+    for i in range(75):
+        tmp = start_time +  timedelta(days=i)
+        times.append(tmp.strftime("%Y-%m-%d"))
+    
+    #寮�濮嬫椂闂� 缁撴潫鏃堕棿
+    start_str = start_time.strftime("%Y-%m-%d")
+    end_str= current_time.strftime("%Y-%m-%d")    
+    
+   
+    
+    #楹诲唱鏁版嵁
+    mayu_data = get_data("rain",mayu_rainfall,start_str,end_str)
+    mayu_dict={}  
+    for i in range(len(mayu_data)):
+        time = mayu_data[i]["time"]
+        value =  mayu_data[i]["value"]
+        if time not in mayu_dict:
+            mayu_dict[time] = value
+          
+       
+    #闄囬┚搴�
+    ljz_data =  get_data("river",ljz_swz,start_str,end_str)
+    ljz_dict ={}
+    for i in range(len(ljz_data)):
+        time = ljz_data[i]["time"]
+        value =  ljz_data[i]["value"]
+        if time not in ljz_data:
+            ljz_dict[time] = value
+    
+    mayu_value=[]
+    ljz_value=[]
+    
+    for i in range(len(times)):
+        tmp = times[i]
+        if tmp in mayu_dict:
+            mayu_value.append(tmp)
+        else:
+            mayu_value.append(0)
+            
+        if tmp in ljz_dict:
+            ljz_value.append(tmp)
+        else:
+            ljz_value.append(0)
+     
+    
+    result.append(times) 
+    result.append(mayu_value) 
+    result.append(ljz_value) 
+    
+    for i in(len(arr1)):
+        data = get_data("gw",arr1[i],start_str,end_str)
+        dictx ={}
+        tmp_arr=[]
+        for i in range(len(data)):
+           time = data[i]["time"]
+           value =  data[i]["value"]
+           if time not in dictx:
+               dictx[time] = value
+               
+               
+        for i in range(len(times)):
+            tmp = times[i]
+            if tmp in dictx:
+                tmp_arr.append(tmp)
+            else:
+                tmp_arr.append(0)
+                
+        result.append(tmp_arr) 
+        
+    np_arr = np.asarray(result)
+    #鏁扮粍杞疆
+    np_result = np_arr.T
+    
+    return np_result
+
+
+#鑾峰彇鐜夋硥灞�15澶╃殑棰勬祴闇�瑕佺殑鏁版嵁
+def getyqs15_real_data():
+    
+    
+    result =[]
+    
+    current_time = datetime.now()
+    start_time =  current_time- timedelta(days=60)
+    times=[]
+    #鏃堕棿搴忓垪
+    for i in range(75):
+        tmp = start_time +  timedelta(days=i)
+        times.append(tmp.strftime("%Y-%m-%d"))
+    
+    #寮�濮嬫椂闂� 缁撴潫鏃堕棿
+    start_str = start_time.strftime("%Y-%m-%d")
+    end_str= current_time.strftime("%Y-%m-%d")    
+    
+   
+    
+    #楹诲唱鏁版嵁
+    hd_data = get_data("rain",haidian_rainfall,start_str,end_str)
+    hd_dict={}  
+    for i in range(len(hd_data)):
+        time = hd_data[i]["time"]
+        value =  hd_data[i]["value"]
+        if time not in hd_dict:
+            hd_dict[time] = value
+          
+       
+    #闄囬┚搴�
+    ljz_data =  get_data("river",ljz_swz,start_str,end_str)
+    ljz_dict ={}
+    for i in range(len(ljz_data)):
+        time = ljz_data[i]["time"]
+        value =  ljz_data[i]["value"]
+        if time not in ljz_data:
+            ljz_dict[time] = value
+    
+    hd_value=[]
+    ljz_value=[]
+    
+    for i in range(len(times)):
+        tmp = times[i]
+        if tmp in hd_dict:
+            hd_value.append(tmp)
+        else:
+            hd_value.append(0)
+            
+        if tmp in ljz_dict:
+            ljz_value.append(tmp)
+        else:
+            ljz_value.append(0)
+     
+    
+    result.append(times) 
+    result.append(hd_value) 
+    result.append(ljz_value) 
+    
+    for i in(len(arr2)):
+        data = get_data("gw",arr2[i],start_str,end_str)
+        dictx ={}
+        tmp_arr=[]
+        for i in range(len(data)):
+           time = data[i]["time"]
+           value =  data[i]["value"]
+           if time not in dictx:
+               dictx[time] = value
+               
+               
+        for i in range(len(times)):
+            tmp = times[i]
+            if tmp in dictx:
+                tmp_arr.append(tmp)
+            else:
+                tmp_arr.append(0)
+                
+        result.append(tmp_arr) 
+        
+    np_arr = np.asarray(result)
+    #鏁扮粍杞疆
+    np_result = np_arr.T
+    
+    return np_result
+
+
+
+####################################################
+
+def get_data(types,num,start_time,end_time):
+
+    if types =='river':
+        return river_list(start_time,end_time,num)
+    if types =='rain':
+        return rain_list(start_time,end_time,num)
+    
+    if types =='gw':
+        return gw_list(start_time,end_time,num)
+    
+#娌虫祦 姘存枃绔�
+def river_list(start_time,end_time,STCD):
+    # 寤虹珛涓嶰racle鏁版嵁搴撶殑杩炴帴
+    connection = cx_Oracle.connect('mzy/mzy_^22dPoO0@192.168.44.8:1521/swzdh') 
+    # 鍒涘缓娓告爣瀵硅薄
+    cursor = connection.cursor()
+
+    sql = """  
+    SELECT * FROM swzdh.river  
+    WHERE STCD = :STCD and tm BETWEEN TO_DATE(:start_time, 'YYYY-MM-DD') AND TO_DATE(:end_time, 'YYYY-MM-DD')  order by tm ASC
+    """
+    res =[]
+    try:
+        cursor.execute(sql, {'start_time': start_time, 'end_time': end_time,'STCD':STCD}) 
+        
+        column_names = [row[0] for row in cursor.description]  
+        print("瀛楁鍚�:", column_names)  
+    
+        # 鑾峰彇鎵�鏈夌粨鏋滈泦
+        result = cursor.fetchall()
+        
+        # 杈撳嚭缁撴灉
+        for row in result:     
+            times = row[2]
+            date_str = times.strftime("%Y-%m-%d")
+            dicts={"time":date_str,"value":row[4],"stname":row[1]}
+            res.append(dicts)
+          
+    except Exception as e:
+            print("Error occurred: ", str(e))
+    finally:
+    # 鍏抽棴娓告爣鍜岃繛鎺�
+        cursor.close()
+        connection.close()
+    return res
+   
+   #闄嶉洦閲� 
+def rain_list(start_time,end_time,STCD):
+    # 寤虹珛涓嶰racle鏁版嵁搴撶殑杩炴帴
+    connection = cx_Oracle.connect('mzy/mzy_^22dPoO0@192.168.44.8:1521/swzdh') 
+    # 鍒涘缓娓告爣瀵硅薄
+    cursor = connection.cursor()
+
+    sql = """  
+    SELECT * FROM swzdh.rain  
+    WHERE STCD = :STCD and tm BETWEEN TO_DATE(:start_time, 'YYYY-MM-DD') AND TO_DATE(:end_time, 'YYYY-MM-DD')  order by tm ASC
+    """
+    res =[]
+    try:
+        cursor.execute(sql, {'start_time': start_time, 'end_time': end_time,'STCD':STCD}) 
+        
+        column_names = [row[0] for row in cursor.description]  
+        print("瀛楁鍚�:", column_names)  
+    
+        # 鑾峰彇鎵�鏈夌粨鏋滈泦
+        result = cursor.fetchall()
+        print(result)
+        
+        date_str_arr=[]
+        date_str_dict={}
+        # 杈撳嚭缁撴灉
+        name =""
+        for row in result:  
+            name=  row[1]
+            times = row[2]
+            date_str = times.strftime("%Y-%m-%d")
+            R =  row[3]
+            if date_str in date_str_dict:
+                date_str_dict[date_str] = date_str_dict[date_str] + R
+            else:
+                date_str_dict[date_str] = R
+                date_str_arr.append(date_str)
+        for item in date_str_arr:
+           value = round(date_str_dict[item],2)
+           temp ={"time":item,"value":value,"stname":name}
+           res.append(temp)
+        
+    except Exception as e:
+            print("Error occurred: ", str(e))
+    finally:
+    # 鍏抽棴娓告爣鍜岃繛鎺�
+        cursor.close()
+        connection.close()
+    return res 
+
+    
+# 鍦颁笅姘寸洃娴嬩簳  涓や釜琛� V_WT_YRS锛孷_Z_YRS
+def gw_list(start_time,end_time,STCD):
+
+    conn = pymssql.connect(server='192.168.44.66',
+                           user='xsyrs', 
+                           password='gws@xsyrs2024', 
+                           database='DB_DXS',
+                           as_dict=True)
+    
+    cursor = conn.cursor()
+    res =[]
+    try:
+        sql = "SELECT * FROM V_Z_YRS where STCD = '" +STCD +"' and TM >= '"+start_time +"' and TM <= '"+end_time +"' order by TM ASC" 
+        cursor.execute(sql)
+        result = cursor.fetchall()
+    
+        for row in result:
+            times = row["TS"]
+            date_str = times.strftime("%Y-%m-%d")
+            value =  float(row["Z"])
+            bd = float(row["BD"])
+            dicts={"time":date_str,"value":value,"bd":bd}
+            res.append(dicts)
+    
+        
+    except Exception as e:
+        print("Error occurred:", str(e))
+    finally:
+        cursor.close()
+        conn.close()
+    return res
\ No newline at end of file
diff --git a/DataTransf.py b/DataTransf.py
new file mode 100644
index 0000000..903a6cf
--- /dev/null
+++ b/DataTransf.py
@@ -0,0 +1,526 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Mar 20 14:44:36 2024
+
+@author: ZMK
+"""
+import numpy as np
+import flopy
+from openpyxl import load_workbook
+import os
+import Base as base
+import CalHead
+
+#娌虫祦鐨勫垎娈典笅鏍�
+riv_seg={0:[454,479],1:[480,505],2:[506,527],3:[528,562]}
+riv_seg_celles=[26,26,22,35]
+#杈圭晫鐨勫垎娈典笅鏍�
+ibound_seg={0:[1,86],1:[87,111],2:[112,142],3:[143,170],4:[171,240],
+             5:[241,282],6:[283,354],7:[355,393],8:[394,436],9:[437,453]}
+
+
+
+def base_excel(model_name,types):
+    if types =='闄嶉洦':
+        return rch_excel(model_name)
+    if types =='娌虫祦':
+        return river_excel(model_name)
+    if types =='寮�閲囬噺':
+        return well_excel(model_name)
+    if types =='杈圭晫':   
+        return ibound_excel(model_name)
+    
+    return []     
+         
+         
+#闄嶉洦cxcel
+def rch_excel(model_name):
+    paths =  base.model_dir + model_name +"\\闄嶉洦.xlsx"
+    data=[]
+    if not os.path.exists(paths):
+        return data      
+    wb = load_workbook(filename = paths)
+    ws = wb[wb.sheetnames[0]]
+   
+    for row in ws.iter_rows():
+        tmp =[]
+        for cell in row:
+          tmp.append(cell.value)
+        data.append(tmp)
+    wb.close()
+    #鍒嗘瀽闄嶆按骞朵笖鏇存柊
+    rch_analysis(data,model_name)
+    return data
+
+def rch_analysis(data_array,model_name):
+      
+    periods =CalHead.get_model_period(model_name)
+    #鍛ㄦ湡鏁�
+    periods_len= len(periods) 
+    
+    array =  np.asarray(data_array)
+    fid= array[1:17,2]
+    params =  array[1:17,3]
+    float_params = np.asarray(params,dtype=float)
+    #鍙栧嚭鏉� 1-16琛�  4-col_last鍒楃殑鏁版嵁
+    col_last = 4 + periods_len
+    data = array[1:17,4:col_last]
+    float_data = np.asarray(data,dtype=float)
+ 
+    for i in range(0,len(float_data)):
+        for j in range(0,len(float_data[i])):
+            float_data[i][j] =  float_data[i][j] * float_params[i] /30/100 
+            
+    rch_update(float_data,fid,model_name,periods_len)
+
+#鏇存柊妯″瀷rch鏁版嵁
+def rch_update(float_data,fids,model_name,periods_len):
+
+    if model_name == base.not_allowed_model or model_name in base.archive_models:
+        return "鏈ā鍨嬩负楠岃瘉妯″瀷,涓嶅厑璁镐慨鏀癸紒"
+    model_ws = base.model_dir + model_name
+    mx = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws,  exe_name="mf2005", verbose=True,  version="mf2005", check=False)
+    
+    areas= base.getAreas()
+    
+    for per in range(periods_len):
+        #rch 涓�涓懆鏈熺殑鏁版嵁
+        item = mx.rch.rech.__getitem__(kper=per)
+        array2d = item.get_value() 
+        
+        per_data = float_data[:,per]
+        print(per_data)
+        
+        for i in range(0,len(per_data)):           
+            data =round(float(per_data[i]),8)         
+            fid = fids[i]
+            tuples = areas[fid]  
+            for entity in tuples:
+                    x = entity[0]
+                    y = entity[1]
+                    array2d[x][y]= data
+        
+        mx.rch.rech.__setitem__(key=per, value=array2d)
+        
+     
+    rch = flopy.modflow.ModflowRch(mx,nrchop=mx.rch.nrchop,
+                                        ipakcb=mx.rch.ipakcb,
+                                        rech=mx.rch.rech,
+                                        irch =mx.rch.irch)
+    rch.write_file(check=False)
+
+
+#娌虫祦鏁版嵁瑙f瀽
+def river_excel(model_name):
+    paths =  base.model_dir + model_name +"\\娌虫祦.xlsx"
+    data=[]
+    if not os.path.exists(paths):
+        return data      
+    wb = load_workbook(filename = paths)
+    ws = wb[wb.sheetnames[0]]
+   
+    for row in ws.iter_rows():
+        tmp =[]
+        for cell in row:
+          tmp.append(cell.value)
+        data.append(tmp)
+    wb.close()
+    result =[]
+    for i in range(1,len(data)):
+        result.append(data[i])
+        
+    river_analysis(result,model_name)
+    return result
+
+#鍒嗘瀽娌虫祦鏁版嵁
+def river_analysis(data_array,model_name):
+    
+    periods =CalHead.get_model_period(model_name)
+    periods_len= len(periods)
+    
+    row_last= 1 + periods_len
+    array =  np.asarray(data_array)
+    data = array[1:row_last,1:2]
+    data2 =  array[1:row_last,1:4] 
+
+    merge_array = np.concatenate((data,data2),axis=1)
+   
+    params =  array[1:row_last,5:9]
+
+    float_data = np.asarray(merge_array,dtype=float)  
+    float_params = np.asarray(params,dtype=float)
+  
+    for i in range(0,len(float_data)):
+        for j in range(0,len(float_data[i])):
+              temp = round(float_data[i][j] * float_params[i][j] *30*86400 ,4) 
+              last = round(temp/riv_seg_celles[j]/30,4)
+              float_data[i][j]=last
+    print(float_data)          
+    river_update(float_data,model_name,periods_len)
+    
+
+#鏇存柊娌虫祦鏁版嵁
+def river_update(float_data,model_name,periods_len):
+  
+    if model_name == base.not_allowed_model or model_name in base.archive_models:
+        return "鏈ā鍨嬩负楠岃瘉妯″瀷,涓嶅厑璁镐慨鏀癸紒"
+    
+    model_ws = base.model_dir + model_name
+    ml = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws,  exe_name="mf2005", verbose=True,  version="mf2005", check=False)
+ 
+    #寰幆璁剧疆wel鏂囦欢锛屾洿鏂版暟鎹�
+    #     lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]}  
+    lrcq = {}
+   
+    for per in range(periods_len):
+      
+        wel = []
+        wel = ml.wel.stress_period_data.__getitem__(kper=per)  
+        #娌虫祦鍒嗘鏁版嵁
+        segment0 = float_data[per][0]
+        segment1 = float_data[per][1]
+        segment2 = float_data[per][2]
+        segment3 = float_data[per][3]
+        
+        #瀛樺偍姣忎釜搴斿姏鏈熺殑鏁版嵁
+        array2d = []
+        count = 1
+        for Layer, Row, Column, Q in wel:
+            array = []
+            
+            if count>= riv_seg[0][0] and count <= riv_seg[0][1]:
+                 array = [Layer, Row, Column, segment0]
+                 
+            elif  count>= riv_seg[1][0] and count <= riv_seg[1][1]:
+                 array = [Layer, Row, Column, segment1]
+                 
+            elif  count>= riv_seg[2][0] and count <= riv_seg[2][1]:
+                array = [Layer, Row, Column, segment2]
+                
+            elif  count>= riv_seg[3][0] and count <= riv_seg[3][1]:
+                     array = [Layer, Row, Column, segment3]          
+            else:
+                 array = [Layer, Row, Column, Q]
+                              
+            array2d.append(array)  
+            count +=1
+            
+        lrcq[per] = array2d 
+
+
+    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,
+                                  dtype=ml.wel.dtype,
+                                  options=ml.wel.options,
+                                  stress_period_data=lrcq)
+    ml.write_input()
+    
+    
+
+    #瑙f瀽寮�閲囬噺鐨勬暟鎹�       
+def well_excel(model_name):
+    paths =  base.model_dir + model_name +"\\寮�閲囬噺.xlsx"
+    data=[]
+    if not os.path.exists(paths):
+        return data      
+    wb = load_workbook(filename = paths)
+
+    ws = wb[wb.sheetnames[0]]
+   
+    for row in ws.iter_rows():
+        tmp =[]
+        for cell in row:
+          tmp.append(cell.value)
+        data.append(tmp)
+    wb.close()
+    result =[]
+    for i in range(1,len(data)):
+        result.append(data[i])
+    well_analysis(result,model_name)
+    return result
+
+
+#寮�閲囬噺鐨勫垎鏋�  
+def well_analysis(data_array,model_name): 
+    
+    periods =CalHead.get_model_period(model_name)
+    periods_len= len(periods)
+    row_last= 1 + periods_len
+    
+    array =  np.asarray(data_array)
+    data = array[1:row_last,1:5]
+    float_data= np.asarray(data,dtype=float)
+    
+    #杞疆鏁扮粍
+    zz_array = float_data.transpose()
+    
+    zz_data = []
+    for i in range(50):
+        zz_data.append(zz_array[0])
+    for i in range(49):
+         zz_data.append(zz_array[1])
+    for i in range(18):
+         zz_data.append(zz_array[2])
+    for i in range(12):
+         zz_data.append(zz_array[3])
+    zz_data.append(zz_array[0])
+
+    np_data = np.asarray(zz_data,dtype=float)
+
+    well_scale = np.loadtxt(base.well_scale_path, dtype=str)
+    float_scale= np.asarray(well_scale,dtype=float)
+    
+    ##鎶芥按閲� = 鍚勫煄鍖烘湀寮�閲囬噺*琛楅亾姣斾緥*10000/(琛楅亾浜曟暟*30) 
+    for i in range(0,len(np_data)):
+        for j in range(0,len(np_data[i])):
+            tmp = np_data[i][j] * float_scale[i][0]*10000/( float_scale[i][1] *30)
+            np_data[i][j] = round(tmp,4)
+    
+    well_update(np_data,model_name,periods_len)
+    
+
+#鏇存柊寮�閲囬噺
+def well_update(np_data,model_name,periods_len):
+ 
+    if model_name == base.not_allowed_model or model_name in base.archive_models:
+        return "鏈ā鍨嬩负楠岃瘉妯″瀷,涓嶅厑璁镐慨鏀癸紒"
+    
+    model_ws = base.model_dir + model_name
+    ml = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws,  exe_name="mf2005", verbose=True,  version="mf2005", check=False)
+    
+    #寰幆璁剧疆wel鏂囦欢锛屾洿鏂版暟鎹�
+    #     lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]}
+    lrcq = {}
+    for per in range(periods_len):
+        wel = []
+        wel = ml.wel.stress_period_data.__getitem__(kper=per)
+        
+        per_data = np_data[:,per]
+ 
+        #瀛樺偍姣忎釜搴斿姏鏈熺殑鏁版嵁
+        array2d = []
+  
+        #well鏂囦欢涓� 鍓�562涓綉鏍间笉鏄簳
+             # Layer= wel[i][0]
+             # Row=  wel[i][1]
+             # Col = wel[i][2]
+             # Q =  wel[i][3]
+        for i in range(0,562):
+             array = [wel[i][0],wel[i][1], wel[i][2],wel[i][3]]
+             array2d.append(array)
+             
+        for i in range(562,len(wel)):
+            indexid = i-562
+            update_data=per_data[indexid]
+            array = [wel[i][0],wel[i][1], wel[i][2],update_data]
+            array2d.append(array)
+                    
+        lrcq[per] = array2d 
+
+    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,dtype=ml.wel.dtype,
+                                  options=ml.wel.options,
+                                  stress_period_data=lrcq)
+    ml.write_input()
+    
+
+
+   
+  #瑙f瀽杈圭晫鐨勬暟鎹�       
+def ibound_excel(model_name):
+    paths =  base.model_dir + model_name +"\\杈圭晫.xlsx" 
+    data=[]
+    if not os.path.exists(paths):
+        return data      
+    wb = load_workbook(filename = paths)
+
+    ws = wb[wb.sheetnames[0]]
+   
+    for row in ws.iter_rows():
+        tmp =[]
+        for cell in row:
+          tmp.append(cell.value)
+        data.append(tmp)
+    wb.close()
+    result =[]
+    for i in range(1,len(data)):
+        result.append(data[i])
+    
+    np_array =  ibound_analysis(result,model_name)
+    
+    #杩斿洖椤甸潰灞曠ず鐨勫唴瀹规暟缁�
+    view_data = ibound_view_data(np_array)
+    
+    periods =CalHead.get_model_period(model_name)
+    periods_len= len(periods)
+    
+    ibound_update(np_array,model_name,periods_len)
+    
+    return view_data    
+
+
+
+
+#鏇存柊杈圭晫
+def ibound_update(np_array,model_name,periods_len):
+   
+    if model_name == base.not_allowed_model or model_name in base.archive_models:
+        return "鏈ā鍨嬩负楠岃瘉妯″瀷,涓嶅厑璁镐慨鏀癸紒"
+    
+    model_ws = base.model_dir + model_name
+    ml = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws,  exe_name="mf2005", verbose=True,  version="mf2005", check=False)
+
+    #寰幆璁剧疆wel鏂囦欢锛屾洿鏂版暟鎹�
+    #     lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]}
+    lrcq = {}
+    for per in range(periods_len):
+        wel = []
+        wel = ml.wel.stress_period_data.__getitem__(kper=per)
+        per_data = np_array[:,per]
+
+        #瀛樺偍姣忎釜搴斿姏鏈熺殑鏁版嵁
+        array2d = []
+ 
+        count = 1
+        for Layer, Row, Column, Q in wel:
+            array = []
+            
+            if  count>= ibound_seg[0][0] and count <= ibound_seg[0][1]:
+                 array = [Layer, Row, Column, per_data[0]]
+        
+            elif  count>= ibound_seg[1][0] and count <= ibound_seg[1][1]:
+                 array = [Layer, Row, Column, per_data[1]]
+                 
+            elif  count>= ibound_seg[2][0] and count <= ibound_seg[2][1]:
+                array = [Layer, Row, Column, per_data[2]]
+                
+            elif  count>= ibound_seg[3][0] and count <= ibound_seg[3][1]:
+                     array = [Layer, Row, Column, per_data[3]]
+                     
+            elif  count>= ibound_seg[4][0] and count <= ibound_seg[4][1]:
+                     array = [Layer, Row, Column, per_data[4]] 
+                     
+            elif  count>= ibound_seg[5][0] and count <= ibound_seg[5][1]:
+                     array = [Layer, Row, Column, per_data[5]]   
+                     
+            elif  count>= ibound_seg[6][0] and count <= ibound_seg[6][1]:
+                 array = [Layer, Row, Column, per_data[6]]
+                 
+            elif  count>= ibound_seg[7][0] and count <= ibound_seg[7][1]:
+                array = [Layer, Row, Column, per_data[7]]
+                
+            elif  count>= ibound_seg[8][0] and count <= ibound_seg[8][1]:
+                     array = [Layer, Row, Column, per_data[8]]
+                     
+            elif  count>= ibound_seg[9][0] and count <= ibound_seg[9][1]:
+                     array = [Layer, Row, Column, per_data[9]]  
+                                   
+            else:
+                 array = [Layer, Row, Column, Q]
+                              
+            array2d.append(array)  
+            count +=1
+            
+        lrcq[per] = array2d 
+
+    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,
+                                  dtype=ml.wel.dtype,
+                                  options=ml.wel.options,
+                                  stress_period_data=lrcq)
+    ml.write_input()
+   
+    
+    
+#杈圭晫鐨勫垎鏋�  
+def ibound_analysis(data_array,model_name):  
+    array =  np.asarray(data_array)
+    data = array[1:10,2:14]
+    #澶у彴 鍗楄緵鎴� 涓婅媷鐢� 涓夊搴� 楹昏 娓╂硥 娴锋穩 闃冲潑 娌欐渤
+    float_data= np.asarray(data,dtype=float)
+    
+    # 娴佸叆1-1 (1-6)澶у彴 鍗楄緵鎴� 涓婅媷鐢� 涓夊搴� 楹昏 娓╂硥
+    result =[]
+    data1= float_data[[0,1,2,3,4,5]]
+   
+    sum1 = data1.sum(axis = 0)
+  
+    result.append(sum1)
+    
+    # 娴佸叆1-2()涓婅媷鐢� 
+    data2= float_data[2]
+
+    result.append(data2)
+     #娴佸叆1-3() 澶у彴
+    data3 = float_data[0]
+    result.append(data3)
+     
+     # 娴佸嚭1-1 澶у彴  鍗楄緵鎴�
+    data4 =  float_data[[0,1]]
+    sum4 =  data4.sum(axis = 0)
+    result.append((-sum4))
+     # 娴佸嚭1-2   澶у彴
+    data5 =  float_data[0]
+    result.append((-data5))
+    
+     #娴佸叆3-1 澶у彴
+    data6 =  float_data[0]
+    result.append(data6)
+     
+     # 娴佸叆3-2 澶у彴  鍗楄緵鎴� 涓婅媷鐢�
+    data7 =  float_data[[0,1,2]]
+    sum7 = data7.sum(axis = 0)
+    result.append(sum7)
+     
+     # 娴佸叆3-3 澶у彴  鍗楄緵鎴� 涓婅媷鐢�
+    # data8= float_data[[0,1,2]]
+    result.append(sum7)  
+     # 娴佸叆3-4 澶у彴  鍗楄緵鎴� 涓婅媷鐢�
+    # data9= float_data[[0,1,2]]   
+    result.append(sum7)
+     # 娴佸叆3-5 澶у彴  鍗楄緵鎴� 涓婅媷鐢�
+    # data10= float_data[[0,1,2]] 
+    result.append(sum7)
+  
+    np_data = np.asarray(result,dtype=float)
+   
+    np_data = np.around(np_data, decimals=2)
+    
+    return np_data
+ 
+ 
+def ibound_view_data(np_data):
+   
+    names=['娴佸叆1-1','娴佸叆1-2','娴佸叆1-3','娴佸嚭1-1','娴佸嚭1-2',
+          '娴佸叆3-1','娴佸叆3-2','娴佸叆3-3','娴佸叆3-4','娴佸叆3-5']
+    row_sums = np.sum(np_data, axis=1)
+    row_sums= np.around(row_sums,2)
+   
+    params=[1,1,1,2.5,1,0.5,0.5,0.5,0.5,0.5]
+    cells=[86,47,31,28,50,42,72,39,43,17]
+    per=12
+    x=30
+    water=[]
+    for i in range(0,len(names)):
+        tmp = round( row_sums[i] * params[i] * cells[i] * per *x , 2)
+        water.append(tmp)
+    arr=[]
+    arr.append(names)
+    arr.append(row_sums)
+    arr.append(params)
+    arr.append(water)
+    str_np = np.asarray(arr,dtype=str)
+    zz= str_np.transpose()
+    
+    title =['杈圭晫','闄嶉洦閲�','绯绘暟','杈撳叆鍊�']
+    result =[]
+    result.append(title)
+   
+    for item in zz:
+        result.append(item)
+    
+    result = np.asarray(result,dtype=str).tolist()
+    
+    return result
+
+
+
+
+
diff --git a/GRU_zmk.py b/GRU_zmk.py
new file mode 100644
index 0000000..9ff5282
--- /dev/null
+++ b/GRU_zmk.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Fri May 24 15:33:12 2024
+
+@author: BDYGS
+"""
+
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Apr 23 05:28:13 2024
+
+@author: BDYGS
+"""
+
+# -*- coding: utf-8 -*-
+"""
+Created on Sat Apr  6 04:01:04 2024
+
+@author: BDYGS
+"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import torch
+import torch.nn as nn
+from torch.autograd import Variable
+from sklearn.preprocessing import StandardScaler, MinMaxScaler
+from sklearn.metrics import mean_squared_error
+from sklearn.metrics import mean_absolute_error
+from torch.utils.data import TensorDataset
+from tqdm import tqdm
+
+
+
+class Config():
+    data_path = "C:\\Users\\ZMK\\Desktop\\GRU\鐜夋硥灞变簳.csv"
+    timestep = 60  # 鏃堕棿姝ラ暱锛屽氨鏄埄鐢ㄥ灏戞椂闂寸獥鍙�
+    batch_size = 30  # 鎵规澶у皬
+    feature_size = 8  # 姣忎釜姝ラ暱瀵瑰簲鐨勭壒寰佹暟閲忥紝杩欓噷鍙娇鐢�1缁达紝姣忓ぉ鐨勯閫�
+    hidden_size = 256  # 闅愬眰澶у皬
+    output_size = 15  # 鐢变簬鏄崟杈撳嚭浠诲姟锛屾渶缁堣緭鍑哄眰澶у皬涓�1锛岄娴嬫湭鏉�1澶╅閫�
+    num_layers = 2  # gru鐨勫眰鏁�
+    epochs = 100 # 杩唬杞暟
+    best_loss = 0 # 璁板綍鎹熷け
+    learning_rate = 0.0003 # 瀛︿範鐜�
+
+
+config = Config()
+
+
+def normalization(data,label):
+    mm_x=MinMaxScaler() # 瀵煎叆sklearn鐨勯澶勭悊瀹瑰櫒
+    mm_y=MinMaxScaler()
+    data=mm_x.fit_transform(data) # 瀵规暟鎹拰鏍囩杩涜褰掍竴鍖栫瓑澶勭悊
+    label=mm_y.fit_transform(label)
+    return data,label,mm_y
+
+
+def split_windows(data,seq_len,output_size):
+    x=[]
+    y=[]
+    for i in range(len(data)-seq_len-1-output_size): # range鐨勮寖鍥撮渶瑕佸噺鍘绘椂闂存闀垮拰1
+        _x=data[i:(i+seq_len),:]
+        _y=data[(i+seq_len):(i+seq_len+output_size),2:]  #娉ㄦ剰锛侊紒锛佽繖涓湴鏂规槸鍙杔abel鐨�
+
+        x.append(_x)
+        y.append(_y)
+    print('split_windows_i:',i)
+    print(_x.shape,_y.shape)
+    x,y=np.array(x),np.array(y)
+    print('x.shape,y.shape=\n',x.shape,y.shape)  
+    return x,y
+
+
+
+
+def split_data(x,y,split_ratio):
+
+    train_size=int(len(y)*split_ratio)
+    test_size=len(y)-train_size
+
+    x_data=Variable(torch.Tensor(np.array(x)))
+    y_data=Variable(torch.Tensor(np.array(y)))
+
+    x_train=Variable(torch.Tensor(np.array(x[0:train_size])))
+    y_train=Variable(torch.Tensor(np.array(y[0:train_size])))
+    y_test=Variable(torch.Tensor(np.array(y[train_size:len(y)])))
+    x_test=Variable(torch.Tensor(np.array(x[train_size:len(x)])))
+
+    print('x_data.shape,y_data.shape,x_train.shape,y_train.shape,x_test.shape,y_test.shape:\n{}{}{}{}{}{}'
+    .format(x_data.shape,y_data.shape,x_train.shape,y_train.shape,x_test.shape,y_test.shape))
+
+    return x_data,y_data,x_train,y_train,x_test,y_test
+
+
+def nash_sutcliffe_efficiency(y_true, y_pred):
+    """
+    璁$畻Nash-Sutcliffe Efficiency鎸囨爣銆�
+    鍙傛暟:
+    y_true : array-like, 鐪熷疄瑙傛祴鍊�
+    y_pred : array-like, 棰勬祴鍊�
+    杩斿洖:
+    nse : float, Nash-Sutcliffe Efficiency
+    """
+    return 1 - np.sum((y_true - y_pred)**2) / np.sum((y_true - np.mean(y_true))**2)
+
+
+
+# 1.鍔犺浇鏃堕棿搴忓垪鏁版嵁
+
+df= pd.read_csv(config.data_path,parse_dates=["date"],index_col=[0])  
+#parse_dates灏嗚鍒楄涓烘椂闂寸储寮�
+
+print(df.shape)
+
+data = df.iloc[:,0:8]  # 浠ョ壒寰佷綔涓烘暟鎹�
+label = df.iloc[:,7] 
+data = data.values
+label = label.values.reshape(-1,1)
+
+# torch.manual_seed(7)  #璁剧疆鐢熸垚闅忔満鏁扮殑绉嶅瓙锛屼互淇濊瘉浠g爜鐨勫彲閲嶅鎬�
+
+data,label,mm_y=normalization(data,label)
+
+dataX,dataY = split_windows(data,config.timestep,config.output_size)
+
+x_data,y_data,x_train,y_train,x_test,y_test = split_data(dataX,dataY,0.8)
+
+# 5.褰㈡垚璁粌鏁版嵁闆�
+train_data = TensorDataset(x_train,y_train)
+test_data = TensorDataset(x_test,y_test)
+
+
+# 6.灏嗘暟鎹姞杞芥垚杩唬鍣�
+train_loader = torch.utils.data.DataLoader(train_data,
+                                           config.batch_size,
+                                           False)
+
+test_loader = torch.utils.data.DataLoader(test_data,
+                                          config.batch_size,
+                                          False)
+
+
+# 7.瀹氫箟GRU缃戠粶
+class GRU(nn.Module):
+    def __init__(self, feature_size, hidden_size, num_layers, output_size):
+        super(GRU, self).__init__()
+        self.hidden_size = hidden_size 
+        self.output_size = output_size
+        # 闅愬眰澶у皬
+        self.num_layers = num_layers  # gru灞傛暟
+        # feature_size涓虹壒寰佺淮搴︼紝灏辨槸姣忎釜鏃堕棿鐐瑰搴旂殑鐗瑰緛鏁伴噺锛岃繖閲屼负1
+        self.gru = nn.GRU(feature_size, hidden_size, num_layers, dropout=0.8,batch_first=True)
+        self.fc1 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc2 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc3 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc4 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc5 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc6 = nn.Linear(self.hidden_size, self.output_size)
+
+    def forward(self, x, hidden=None):
+        batch_size = x.size()[0] # 鑾峰彇鎵规澶у皬
+        
+        # 鍒濆鍖栭殣灞傜姸鎬�
+        if hidden is None:
+            h_0 = x.data.new(self.num_layers, batch_size, self.hidden_size).fill_(0).float()
+        else:
+            h_0 = hidden
+            
+        # GRU杩愮畻
+        output, h_0 = self.gru(x, h_0)
+        
+        # 鑾峰彇GRU杈撳嚭鐨勭淮搴︿俊鎭�
+        batch_size, timestep, hidden_size = output.shape  
+            
+        # 灏唎utput鍙樻垚 batch_size * timestep, hidden_dim
+        # output = output.reshape(-1, hidden_size)
+       
+        preds = []
+        pred1, pred2, pred3 = self.fc1(output), self.fc2(output), self.fc3(output)
+        pred1, pred2, pred3 = pred1[:, -1, :], pred2[:, -1, :], pred3[:, -1, :]
+        pred4, pred5, pred6 = self.fc4(output), self.fc5(output), self.fc6(output)
+        pred4, pred5, pred6 = pred4[:, -1, :], pred5[:, -1, :], pred6[:, -1, :]
+        pred = torch.stack([pred1, pred2, pred3,pred4, pred5, pred6], dim=2)
+        
+        return pred
+
+
+model = GRU(config.feature_size, config.hidden_size, config.num_layers, config.output_size)  # 瀹氫箟GRU缃戠粶
+print(model)
+loss_function = nn.MSELoss()  # 瀹氫箟鎹熷け鍑芥暟
+optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)  # 瀹氫箟浼樺寲鍣�
+
+# 8.妯″瀷璁粌
+for epoch in range(config.epochs):
+    model.train()
+    running_loss = 0
+    train_bar = tqdm(train_loader)  # 褰㈡垚杩涘害鏉�
+    for data in train_bar:
+        x_train, y_train = data  # 瑙e寘杩唬鍣ㄤ腑鐨刋鍜孻
+        optimizer.zero_grad()
+        y_train_pred = model(x_train)
+
+        loss = loss_function(y_train_pred, y_train)
+        loss.backward()
+        optimizer.step()
+
+        running_loss += loss.item()
+        train_bar.desc = "train epoch[{}/{}] loss:{:.6f}".format(epoch + 1,
+                                                                 config.epochs,
+                                                                 loss)
+
+print('Finished Training')
+
+model_name = 'GRU_YQS'
+torch.save(model.state_dict(), 'C://Users//ZMK//Desktop//GRU/{}.pth'.format(model_name))
+
+
+model.eval() 
+
+#妯″瀷楠岃瘉锛氱敤鍘熷鏁版嵁鍐嶇畻涓�娆�
+
+df= pd.read_csv(config.data_path,parse_dates=["date"],index_col=[0])  
+#parse_dates灏嗚鍒楄涓烘椂闂寸储寮�
+
+data = df.iloc[:,0:8]  # 浠ュ洓涓壒寰佷綔涓烘暟鎹�
+label = df.iloc[:,7] 
+data = data.values
+label = label.values.reshape(-1,1)
+
+
+data,label,mm_y=normalization(data,label)
+
+dataX,dataY = split_windows(data,config.timestep,config.output_size)
+
+x_data,y_data,x_train,y_train,x_test,y_test = split_data(dataX,dataY,0.8)
+
+test_pre = model(x_data)  
+
+with pd.ExcelWriter("C:\\Users\\ZMK\\Desktop\\GRU\GRU-YQS.xlsx", engine='openpyxl') as writer:
+    
+    for i in range(6):
+        test_pre_data = test_pre[:,0,i].data.numpy().reshape(-1,1)
+        y_test_pre = y_data[:,0,i].data.numpy().reshape(-1,1)
+        
+        print(test_pre_data.shape)
+        
+        test_pre_data_inv = mm_y.inverse_transform(test_pre_data)
+        
+        # print(test_pre_data_inv.shape)
+        y_test_inv =mm_y.inverse_transform(y_test_pre)
+        
+        
+        plt.figure(figsize=(10,5))
+        plt.plot(y_test_inv)
+        plt.plot(test_pre_data_inv)
+        plt.legend(('real', 'predict'),fontsize='15')
+        plt.show()  
+        
+        print('MAE/RMSE/NSE')
+        print(mean_absolute_error(y_test_inv, test_pre_data_inv))
+        print(np.sqrt(mean_squared_error(y_test_inv, test_pre_data_inv)))
+        print(nash_sutcliffe_efficiency(y_test_inv, test_pre_data_inv))
+        
+        y_test_inv = pd.DataFrame(y_test_inv, columns=[f'True Node {i+1}'])
+        test_pre_data_inv = pd.DataFrame(test_pre_data_inv, columns=[f'test Node {i+1}'])
+        
+        # 灏嗙粨鏋滀繚瀛樺埌涓嶅悓鐨勫伐浣滆〃涓�
+        test_pre_data_inv.to_excel(writer, sheet_name=f'True Node {i+1}', index=False)
+        y_test_inv.to_excel(writer, sheet_name=f'test Node {i+1}', index=False)
+
+
+
+
diff --git "a/GRU_zmk_pre - \345\211\257\346\234\254.py" "b/GRU_zmk_pre - \345\211\257\346\234\254.py"
new file mode 100644
index 0000000..010b70f
--- /dev/null
+++ "b/GRU_zmk_pre - \345\211\257\346\234\254.py"
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Sun May 26 02:15:11 2024
+
+@author: BDYGS
+"""
+
+
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import torch
+import torch.nn as nn
+from torch.autograd import Variable
+# import tushare as ts
+from sklearn.preprocessing import StandardScaler, MinMaxScaler
+from sklearn.metrics import mean_squared_error
+from sklearn.metrics import mean_absolute_error
+from torch.utils.data import TensorDataset
+from tqdm import tqdm
+
+
+
+
+class Config():
+   # data_path = "C:\\Users\\ZMK\\Desktop\\GRU\姘稿畾娌充簳.csv"
+    timestep = 60  # 鏃堕棿姝ラ暱锛屽氨鏄埄鐢ㄥ灏戞椂闂寸獥鍙�
+    batch_size = 30  # 鎵规澶у皬
+    feature_size = 8  # 姣忎釜姝ラ暱瀵瑰簲鐨勭壒寰佹暟閲忥紝杩欓噷鍙娇鐢�1缁达紝姣忓ぉ鐨勯閫�
+    hidden_size = 256  # 闅愬眰澶у皬
+    output_size = 15  # 鐢变簬鏄崟杈撳嚭浠诲姟锛屾渶缁堣緭鍑哄眰澶у皬涓�1锛岄娴嬫湭鏉�1澶╅閫�
+    num_layers = 2  # gru鐨勫眰鏁�
+    epochs = 100 # 杩唬杞暟
+    best_loss = 0 # 璁板綍鎹熷け
+    learning_rate = 0.0003 # 瀛︿範鐜�
+   # model_name = 'GRU_ZMK' # 妯″瀷鍚嶇О
+   # save_path = 'C://Users//ZMK//Desktop//GRU//{}.pth'.format(model_name) # 鏈�浼樻ā鍨嬩繚瀛樿矾寰�
+
+config = Config()
+
+
+def normalization(data,label):
+    mm_x=MinMaxScaler() # 瀵煎叆sklearn鐨勯澶勭悊瀹瑰櫒
+    mm_y=MinMaxScaler()
+    data=mm_x.fit_transform(data) # 瀵规暟鎹拰鏍囩杩涜褰掍竴鍖栫瓑澶勭悊
+    label=mm_y.fit_transform(label)
+    return data,label,mm_y
+
+
+def split_windows(data,seq_len,output_size):
+    x=[]
+    y=[]
+    for i in range(len(data)-seq_len-1-output_size): # range鐨勮寖鍥撮渶瑕佸噺鍘绘椂闂存闀垮拰1
+        _x=data[i:(i+seq_len),:]
+        _y=data[(i+seq_len):(i+seq_len+output_size),2:]  #娉ㄦ剰锛侊紒锛佽繖涓湴鏂规槸鍙杔abel鐨�
+
+        x.append(_x)
+        y.append(_y)
+    print('split_windows_i:',i)
+    print(_x.shape,_y.shape)
+    x,y=np.array(x),np.array(y)
+    print('x.shape,y.shape=\n',x.shape,y.shape)  
+    return x,y
+
+def split_windows_long(data,seq_len,output_size):
+    
+    print(len(data))
+    x=[]
+    y=[]
+    for i in range(int(len(data)/output_size)-4):
+        a = i*output_size
+        # print(a)
+        _x=data[a:a+seq_len,:]
+        # print(_x.shape)
+        _y=data[a+seq_len:a+seq_len+output_size,2:]  #娉ㄦ剰锛侊紒锛佽繖涓湴鏂规槸鍙杔abel鐨�
+        # print(_y.shape)
+        x.append(_x)
+        y.append(_y)
+    print('split_windows_i:',i)
+    # print(_x,_y)
+    x,y=np.array(x),np.array(y)
+    print('x.shape,y.shape=\n',x.shape,y.shape)  #  (1035, 60, 4) (1035,)
+    return x,y
+
+
+def nash_sutcliffe_efficiency(y_true, y_pred):
+    """
+    璁$畻Nash-Sutcliffe Efficiency鎸囨爣銆�
+    鍙傛暟:
+    y_true : array-like, 鐪熷疄瑙傛祴鍊�
+    y_pred : array-like, 棰勬祴鍊�
+    杩斿洖:
+    nse : float, Nash-Sutcliffe Efficiency
+    """
+    return 1 - np.sum((y_true - y_pred)**2) / np.sum((y_true - np.mean(y_true))**2)
+
+
+
+# 7.瀹氫箟GRU缃戠粶
+class GRU(nn.Module):
+    def __init__(self, feature_size, hidden_size, num_layers, output_size):
+        super(GRU, self).__init__()
+        self.hidden_size = hidden_size 
+        self.output_size = output_size
+        # 闅愬眰澶у皬
+        self.num_layers = num_layers  # gru灞傛暟
+        # feature_size涓虹壒寰佺淮搴︼紝灏辨槸姣忎釜鏃堕棿鐐瑰搴旂殑鐗瑰緛鏁伴噺锛岃繖閲屼负1
+        self.gru = nn.GRU(feature_size, hidden_size, num_layers, dropout=0.8,batch_first=True)
+        self.fc1 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc2 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc3 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc4 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc5 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc6 = nn.Linear(self.hidden_size, self.output_size)
+
+    def forward(self, x, hidden=None):
+        batch_size = x.size()[0] # 鑾峰彇鎵规澶у皬
+        
+        # 鍒濆鍖栭殣灞傜姸鎬�
+        if hidden is None:
+            h_0 = x.data.new(self.num_layers, batch_size, self.hidden_size).fill_(0).float()
+        else:
+            h_0 = hidden
+            
+        # GRU杩愮畻
+        output, h_0 = self.gru(x, h_0)
+        
+        # 鑾峰彇GRU杈撳嚭鐨勭淮搴︿俊鎭�
+        batch_size, timestep, hidden_size = output.shape  
+            
+        # 灏唎utput鍙樻垚 batch_size * timestep, hidden_dim
+        # output = output.reshape(-1, hidden_size)
+       
+        preds = []
+        pred1, pred2, pred3 = self.fc1(output), self.fc2(output), self.fc3(output)
+        pred1, pred2, pred3 = pred1[:, -1, :], pred2[:, -1, :], pred3[:, -1, :]
+        pred4, pred5, pred6 = self.fc4(output), self.fc5(output), self.fc6(output)
+        pred4, pred5, pred6 = pred4[:, -1, :], pred5[:, -1, :], pred6[:, -1, :]
+        pred = torch.stack([pred1, pred2, pred3,pred4, pred5, pred6], dim=2)
+        
+        return pred
+
+model = GRU(config.feature_size, config.hidden_size, config.num_layers, config.output_size)  # 瀹氫箟GRU缃戠粶
+print(model)
+loss_function = nn.MSELoss()  # 瀹氫箟鎹熷け鍑芥暟
+optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)  # 瀹氫箟浼樺寲鍣�
+
+#
+model.load_state_dict(torch.load('C://Users//ZMK//Desktop//GRU//GRU_YDH.pth'))
+
+
+model.eval() 
+#pre
+
+df_pre = pd.read_csv("C:\\Users\\ZMK\\Desktop\\GRU\\姘稿畾娌充簳-pre.csv",parse_dates=["date"],index_col=[0])  
+
+print(df_pre.shape)
+
+data_pre = df_pre.iloc[:,0:8]  
+
+label_pre = df_pre.iloc[:,7] #label娌℃湁瀹為檯浣滅敤锛屼富瑕佺敤浣滄鍒欏寲缂╂斁鐨勶紝涓嶅弬涓庤绠�
+
+
+data_pre = data_pre.values 
+
+label_pre = label_pre.values.reshape(-1,1)
+
+data_pre,label_pre,mm_y_pre = normalization(data_pre,label_pre)
+dataX_pre,dataY_pre = split_windows_long(data_pre,config.timestep,config.output_size)
+
+dataX_pre = Variable(torch.Tensor(np.array(dataX_pre)))
+dataY_pre = Variable(torch.Tensor(np.array(dataY_pre)))
+
+print(dataY_pre.shape)
+
+test_pre = model(dataX_pre)    
+
+print(test_pre.shape)
+
+
+
+
+with pd.ExcelWriter("C:\\Users\\ZMK\\Desktop\\GRU\\GRU-pre-ydh.xlsx", engine='openpyxl') as writer:
+    
+    for i in range(6): 
+        test_pre_data = test_pre[:,:,i].data.numpy().reshape(-1,1)
+        y_test_pre = dataY_pre[:,:,i].data.numpy().reshape(-1,1)
+        
+        test_pre_data_inv = mm_y_pre.inverse_transform(test_pre_data)
+        y_test_inv =mm_y_pre.inverse_transform(y_test_pre)
+        
+        
+       # plt.figure(figsize=(10,5))
+       # plt.plot(y_test_inv)
+       # plt.plot(test_pre_data_inv)
+       # plt.legend(('real', 'predict'),fontsize='15')
+       # plt.show()  
+        
+        print('MAE/RMSE/NSE')
+        print(mean_absolute_error(y_test_inv, test_pre_data_inv))
+        print(np.sqrt(mean_squared_error(y_test_inv, test_pre_data_inv)))
+        print(nash_sutcliffe_efficiency(y_test_inv, test_pre_data_inv))
+        
+        y_test_inv = pd.DataFrame(y_test_inv, columns=[f'True Node {i+1}'])
+        test_pre_data_inv = pd.DataFrame(test_pre_data_inv, columns=[f'pre Node {i+1}'])
+        
+        # 灏嗙粨鏋滀繚瀛樺埌涓嶅悓鐨勫伐浣滆〃涓�
+        test_pre_data_inv.to_excel(writer, sheet_name=f'True Node {i+1}', index=False)
+        y_test_inv.to_excel(writer, sheet_name=f'pre Node {i+1}', index=False)
+
+    
+
+
+
+
diff --git a/GRU_zmk_pre.py b/GRU_zmk_pre.py
new file mode 100644
index 0000000..d3bab52
--- /dev/null
+++ b/GRU_zmk_pre.py
@@ -0,0 +1,267 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Sun May 26 02:15:11 2024
+
+@author: BDYGS
+"""
+
+
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import torch
+import torch.nn as nn
+from torch.autograd import Variable
+# import tushare as ts
+from sklearn.preprocessing import StandardScaler, MinMaxScaler
+from sklearn.metrics import mean_squared_error
+from sklearn.metrics import mean_absolute_error
+from torch.utils.data import TensorDataset
+from tqdm import tqdm
+from datetime import datetime
+
+import DataTask
+
+
+path_dir_left ="C://Users//ZMK//Desktop//GRU//"
+
+path_dir_right ="C:\\Users\\ZMK\\Desktop\\GRU\\"
+
+
+
+class Config():
+   # data_path = "C:\\Users\\ZMK\\Desktop\\GRU\姘稿畾娌充簳.csv"
+    timestep = 60  # 鏃堕棿姝ラ暱锛屽氨鏄埄鐢ㄥ灏戞椂闂寸獥鍙�
+    batch_size = 30  # 鎵规澶у皬
+    feature_size = 8  # 姣忎釜姝ラ暱瀵瑰簲鐨勭壒寰佹暟閲忥紝杩欓噷鍙娇鐢�1缁达紝姣忓ぉ鐨勯閫�
+    hidden_size = 256  # 闅愬眰澶у皬
+    output_size = 15  # 鐢变簬鏄崟杈撳嚭浠诲姟锛屾渶缁堣緭鍑哄眰澶у皬涓�1锛岄娴嬫湭鏉�1澶╅閫�
+    num_layers = 2  # gru鐨勫眰鏁�
+    epochs = 100 # 杩唬杞暟
+    best_loss = 0 # 璁板綍鎹熷け
+    learning_rate = 0.0003 # 瀛︿範鐜�
+   # model_name = 'GRU_ZMK' # 妯″瀷鍚嶇О
+   # save_path = 'C://Users//ZMK//Desktop//GRU//{}.pth'.format(model_name) # 鏈�浼樻ā鍨嬩繚瀛樿矾寰�
+
+config = Config()
+
+
+def normalization(data,label):
+    mm_x=MinMaxScaler() # 瀵煎叆sklearn鐨勯澶勭悊瀹瑰櫒
+    mm_y=MinMaxScaler()
+    data=mm_x.fit_transform(data) # 瀵规暟鎹拰鏍囩杩涜褰掍竴鍖栫瓑澶勭悊
+    label=mm_y.fit_transform(label)
+    return data,label,mm_y
+
+
+def split_windows(data,seq_len,output_size):
+    x=[]
+    y=[]
+    for i in range(len(data)-seq_len-1-output_size): # range鐨勮寖鍥撮渶瑕佸噺鍘绘椂闂存闀垮拰1
+        _x=data[i:(i+seq_len),:]
+        _y=data[(i+seq_len):(i+seq_len+output_size),2:]  #娉ㄦ剰锛侊紒锛佽繖涓湴鏂规槸鍙杔abel鐨�
+
+        x.append(_x)
+        y.append(_y)
+    print('split_windows_i:',i)
+    print(_x.shape,_y.shape)
+    x,y=np.array(x),np.array(y)
+    print('x.shape,y.shape=\n',x.shape,y.shape)  
+    return x,y
+
+def split_windows_long(data,seq_len,output_size):
+    
+    print(len(data))
+    x=[]
+    y=[]
+    for i in range(int(len(data)/output_size)-4):
+        a = i*output_size
+        # print(a)
+        _x=data[a:a+seq_len,:]
+        # print(_x.shape)
+        _y=data[a+seq_len:a+seq_len+output_size,2:]  #娉ㄦ剰锛侊紒锛佽繖涓湴鏂规槸鍙杔abel鐨�
+        # print(_y.shape)
+        x.append(_x)
+        y.append(_y)
+    print('split_windows_i:',i)
+    # print(_x,_y)
+    x,y=np.array(x),np.array(y)
+    print('x.shape,y.shape=\n',x.shape,y.shape)  #  (1035, 60, 4) (1035,)
+    return x,y
+
+
+def nash_sutcliffe_efficiency(y_true, y_pred):
+    """
+    璁$畻Nash-Sutcliffe Efficiency鎸囨爣銆�
+    鍙傛暟:
+    y_true : array-like, 鐪熷疄瑙傛祴鍊�
+    y_pred : array-like, 棰勬祴鍊�
+    杩斿洖:
+    nse : float, Nash-Sutcliffe Efficiency
+    """
+    return 1 - np.sum((y_true - y_pred)**2) / np.sum((y_true - np.mean(y_true))**2)
+
+
+
+# 7.瀹氫箟GRU缃戠粶
+class GRU(nn.Module):
+    def __init__(self, feature_size, hidden_size, num_layers, output_size):
+        super(GRU, self).__init__()
+        self.hidden_size = hidden_size 
+        self.output_size = output_size
+        # 闅愬眰澶у皬
+        self.num_layers = num_layers  # gru灞傛暟
+        # feature_size涓虹壒寰佺淮搴︼紝灏辨槸姣忎釜鏃堕棿鐐瑰搴旂殑鐗瑰緛鏁伴噺锛岃繖閲屼负1
+        self.gru = nn.GRU(feature_size, hidden_size, num_layers, dropout=0.8,batch_first=True)
+        self.fc1 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc2 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc3 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc4 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc5 = nn.Linear(self.hidden_size, self.output_size)
+        self.fc6 = nn.Linear(self.hidden_size, self.output_size)
+
+    def forward(self, x, hidden=None):
+        batch_size = x.size()[0] # 鑾峰彇鎵规澶у皬
+        
+        # 鍒濆鍖栭殣灞傜姸鎬�
+        if hidden is None:
+            h_0 = x.data.new(self.num_layers, batch_size, self.hidden_size).fill_(0).float()
+        else:
+            h_0 = hidden
+            
+        # GRU杩愮畻
+        output, h_0 = self.gru(x, h_0)
+        
+        # 鑾峰彇GRU杈撳嚭鐨勭淮搴︿俊鎭�
+        batch_size, timestep, hidden_size = output.shape  
+            
+        # 灏唎utput鍙樻垚 batch_size * timestep, hidden_dim
+        # output = output.reshape(-1, hidden_size)
+       
+        preds = []
+        pred1, pred2, pred3 = self.fc1(output), self.fc2(output), self.fc3(output)
+        pred1, pred2, pred3 = pred1[:, -1, :], pred2[:, -1, :], pred3[:, -1, :]
+        pred4, pred5, pred6 = self.fc4(output), self.fc5(output), self.fc6(output)
+        pred4, pred5, pred6 = pred4[:, -1, :], pred5[:, -1, :], pred6[:, -1, :]
+        pred = torch.stack([pred1, pred2, pred3,pred4, pred5, pred6], dim=2)
+        
+        return pred
+
+
+
+#鏍规嵁缂栧彿鑾峰彇涓嶅悓鐨勯娴嬫ā鍨�
+def getModelName(well_num):
+    if well_num in DataTask.arr1:
+        
+        return 'GRU_YDH.pth'
+    
+    else :
+        return 'GRU_YQS.pth'
+
+
+
+#鍐欏叆鏁版嵁鍒癱sv
+def write_csv(model_name , np_result,csv_path):
+    
+    df =""
+    if model_name =='GRU_YDH.pth':
+        df = pd.DataFrame({"date":np_result[:,0], "Myrainfall":np_result[:,1],
+                   "flowrate":np_result[:,2], "LQWB":np_result[:,3],
+                   "ZSSC":np_result[:,4], "WTY":np_result[:,5],
+                   "LSH":np_result[:,6], "HZZ":np_result[:,7],"GC":np_result[:,8] 
+                   })
+    
+    else:
+        df = pd.DataFrame({"date":np_result[:,0], "HDrainfall":np_result[:,1],
+                   "flowrate":np_result[:,2], "SXC":np_result[:,3],
+                   "XM1":np_result[:,4], "XM2":np_result[:,5],
+                   "SYSC":np_result[:,6], "SJYY":np_result[:,7],"BW":np_result[:,8] 
+                   })
+       
+    df.to_csv(csv_path, index=False)    
+    
+    
+    #杩愯棰勬祴妯″瀷
+def runPredictModel(well_num):
+       
+    data =""
+    csv_path = ""
+    excel_path = ""
+    #棰勬祴妯″瀷
+    model_name = getModelName(well_num)
+    
+        #姘稿畾娌虫ā鍨�
+    if model_name == 'GRU_YDH.pth':
+
+        csv_path = path_dir_right + "姘稿畾娌充簳-pre.csv"
+        excel_path = path_dir_right +  "姘稿畾娌充簳-棰勬祴缁撴灉.xlsx"
+        
+        data = DataTask.get_ydh15_real_data()
+    else:
+        
+        csv_path = path_dir_right + "鐜夋硥灞变簳-pre.csv"
+        excel_path =  path_dir_right +  "鐜夋硥灞变簳-棰勬祴缁撴灉.xlsx"
+        
+        data = DataTask.getyqs15_real_data()
+        
+    
+    #鍐欏叆鏁版嵁鍒癱sv
+    write_csv(model_name,data,csv_path)
+    
+    model_path =   path_dir_left  + model_name 
+    
+    model = GRU(config.feature_size, config.hidden_size, config.num_layers, config.output_size)  # 瀹氫箟GRU缃戠粶
+
+    loss_function = nn.MSELoss()  # 瀹氫箟鎹熷け鍑芥暟
+    optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)  # 瀹氫箟浼樺寲鍣�
+
+    model.load_state_dict(torch.load(model_path))
+    model.eval() 
+      
+    
+     #CSV 鏂囦欢鏁版嵁 璺緞
+
+    #  "C:\\Users\\ZMK\\Desktop\\GRU\\姘稿畾娌充簳-pre.csv"
+    df_pre = pd.read_csv(csv_path , parse_dates=["date"],index_col=[0])  
+
+
+    data_pre = df_pre.iloc[:,0:8]  
+
+    label_pre = df_pre.iloc[:,7] #label娌℃湁瀹為檯浣滅敤锛屼富瑕佺敤浣滄鍒欏寲缂╂斁鐨勶紝涓嶅弬涓庤绠�
+
+    data_pre = data_pre.values 
+
+    label_pre = label_pre.values.reshape(-1,1)
+
+    data_pre,label_pre,mm_y_pre = normalization(data_pre,label_pre)
+    dataX_pre,dataY_pre = split_windows_long(data_pre,config.timestep,config.output_size)
+
+    dataX_pre = Variable(torch.Tensor(np.array(dataX_pre)))
+    dataY_pre = Variable(torch.Tensor(np.array(dataY_pre)))
+
+    test_pre = model(dataX_pre)  
+       
+    
+    with pd.ExcelWriter( excel_path ,  engine='openpyxl') as writer:
+    
+        for i in range(6): 
+            test_pre_data = test_pre[:,:,i].data.numpy().reshape(-1,1)
+            y_test_pre = dataY_pre[:,:,i].data.numpy().reshape(-1,1)
+        
+            test_pre_data_inv = mm_y_pre.inverse_transform(test_pre_data)
+            y_test_inv =mm_y_pre.inverse_transform(y_test_pre)
+        
+
+        
+            y_test_inv = pd.DataFrame(y_test_inv, columns=[f'True Node {i+1}'])
+            test_pre_data_inv = pd.DataFrame(test_pre_data_inv, columns=[f'pre Node {i+1}'])
+        
+            # 灏嗙粨鏋滀繚瀛樺埌涓嶅悓鐨勫伐浣滆〃涓�
+            test_pre_data_inv.to_excel(writer, sheet_name=f'True Node {i+1}', index=False)
+            y_test_inv.to_excel(writer, sheet_name=f'pre Node {i+1}', index=False)
+
+    
+
+
+
+
diff --git a/MainAPI.py b/MainAPI.py
index 78b64b3..a1adfdf 100644
--- a/MainAPI.py
+++ b/MainAPI.py
@@ -4,21 +4,21 @@
 from flask import jsonify
 from flask import request
 from flask_cors import CORS
-import sys
 import numpy as np
-import pandas as pd
 import flopy
-import flopy.utils.binaryfile as bf
-import csv
 import time 
-from openpyxl import load_workbook
 import os 
 import shutil
+import json
 import Base as base 
 import CalHead
 import Predict
-import json
 import ModelPeriod
+import AchiveReport as achiveReport
+import BigData
+import OpenExcel
+import DataTransf
+import DataTask
 
 # Flask鍑芥暟鎺ユ敹涓�涓弬鏁癬_name__锛屽畠浼氭寚鍚戠▼搴忔墍鍦ㄧ殑鍖�
 app = Flask(__name__)
@@ -33,11 +33,13 @@
 #娌虫祦cell鐨勬暟閲�
 riverCellSize = 109
 
-iboundGroupSize = 5
-iboundGroup={1:[1,86],2:[87,111],3:[112,142],4:[143,170],5:[171,240]}
+iboundGroupSize = 10
+iboundGroup={1:[1,86],2:[87,111],3:[112,142],4:[143,170],5:[171,240],
+             6:[241,282],7:[283,354],8:[355,393],9:[394,436],10:[437,453]}
 
-iboundGroup3Size = 5
-iboundGroup3={1:[241,282],2:[283,354],3:[355,393],4:[394,436],5:[437,453]}
+iboundName =["瑗夸晶娴佸叆","鍖楅儴娴佸叆","涓滃寳閮ㄦ祦鍏�","涓滈儴娴佸嚭","鍗楅儴娴佸嚭",
+             "灞卞尯娴佸叆","姘稿畾娌虫柇瑁傛祦鍏�","榛勫簞楂樹附鏂娴佸叆","鍏疂灞辨柇瑁傛祦鍏�","鏄嗘槑婀栨柇瑁傚叆娴�"]
+
 
 riverGroupSize = 4
 riverGroup={1:[454,479],2:[480,505],3:[506,527],4:[528,562]}
@@ -49,7 +51,7 @@
 def getModel(model_name):
     model_ws=""
     if not model_name:
-        model_ws = "202001_202212"
+        model_ws = base.not_allowed_model
     else:
          model_ws = base.model_dir + model_name
 
@@ -60,7 +62,7 @@
 @app.route('/baseparam/', methods=['GET'])
 def baseparam():
     
-    model_name = request.args.get('model_name')   
+    model_name = request.args.get('model_name')
     ml= getModel(model_name)   
     nrclp = ml.get_nrow_ncol_nlay_nper()
     dict = {"Row": nrclp[0], "Column": nrclp[1],
@@ -71,8 +73,31 @@
     
     months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
     dict["months"]=months
+    print(jsondata)
+    if "initHeader" in jsondata:
+        dict["initHead"] = jsondata["initHeader"]
+    else:
+        dict["initHead"] = ""
     return jsonify(dict)
 
+#淇濆瓨鍒濆姘村ご
+@app.route('/saveInitHead', methods=['POST'])
+def saveInitHead():
+    jsondata = request.get_json()
+    model_name = str(jsondata['model_name'])
+    initHeader =  str(jsondata['initHead'])
+    
+    if model_name == base.not_allowed_model or model_name in base.archive_models:
+        return jsonify("鏈ā鍨嬩负楠岃瘉妯″瀷,涓嶅厑璁镐慨鏀癸紒")
+    
+    jsondata= CalHead.get_model_json(model_name)
+    jsondata["initHeader"] = initHeader
+    
+    jpath = base.model_dir + model_name +"\\prediction.json"
+    with open(jpath, "w",encoding='utf-8') as outfile:
+        json.dump(jsondata, outfile,ensure_ascii=False)  
+   
+    return jsonify("淇濆瓨鍒濆姘村ご鎴愬姛锛�")
 
 #璇诲彇wel鏂囦欢 鍙傛暟涓� Period
 @app.route('/welList/', methods=['GET'])
@@ -97,11 +122,16 @@
 
     result_len = len(result)
     
+    ibound_segment={} 
+    
     if layerparam == '1':
              #杈圭晫
+             
+        ibound_segment={"1":[0,85],"2":[86,110],"3":[111,141],"4":[142,169],"5":[170,239]} 
+             
         for i in range(0, 240):
             iboundarray.append(result[i])
-     #娌虫祦
+        #娌虫祦
         for i in range(453, 562):
             riverarray.append(result[i])
 
@@ -114,12 +144,17 @@
             welarray.append(result[i])
             
     elif layerparam == '3':
+        
+        ibound_segment={"6":[0,41],"7":[42,113],"8":[114,152],"9":[153,195],"10":[196,212]} 
+        
         for i in range(240, 453):
             iboundarray.append(result[i])
     
+    ibounddict = {"name": "ibound", "data": iboundarray,"segment":ibound_segment}
     
-    ibounddict = {"name": "ibound", "data": iboundarray}
-    riverdict = {"name": "river", "data": riverarray}
+    riversgement={"1":[0,25],"2":[26,51],"3":[52,73],"4":[74,108]}
+    riverdict = {"name": "river", "data": riverarray,"segment":riversgement}
+    
 
     weldict = {"name": "wel", "data": welarray}
 
@@ -128,6 +163,7 @@
     data.append(ibounddict)
     data.append(weldict)
     return jsonify(data)
+
 
 #璇诲彇鍗曚釜浜曠殑鏁版嵁
 @app.route('/wel/', methods=['GET'])
@@ -169,6 +205,9 @@
     # model_name = request.args.get('model_name')  
     model_name = str(json['model_name'])
     
+    if model_name == base.not_allowed_model or model_name in base.archive_models:
+        return jsonify("鏈ā鍨嬩负楠岃瘉妯″瀷,涓嶅厑璁镐慨鏀癸紒")
+    
 
     ml= getModel(model_name)
     
@@ -199,119 +238,19 @@
             
         lrcq[per] = array2d 
 
-    flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
+    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,dtype=ml.wel.dtype,
+                                 options=ml.wel.options,
+                                 stress_period_data=lrcq)
     ml.write_input()
     
     return jsonify("鏁版嵁鏇存柊瀹屾瘯锛�")
-
-
-#璇诲叆鐢靛瓙琛ㄦ牸鏂囦欢鐢熸垚wel 鏂囦欢
-@app.route('/cellFileInput', methods=['POST'])
-def cellFileInput():
-    
-    path ='C:\\Users\\ZMK\\Desktop\\寰呭彂閫乗\cell鏂囦欢.xlsx'
-    
-    data = get_cell_data(path)
-    
-    lrcq= get_cell_struct(data["excel1"],data["excel2"],data["excel3"])
-    
-    model_name = request.args.get('model_name')
-    
-    ml= getModel(model_name)
-    
-    flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
-    ml.write_input()
-    
-    return jsonify("sucess")
-
-
-def get_cell_struct(excel1,excel2,excel3):
-     lrcq={}
-     
-     #鍛ㄦ湡鏁伴噺
-     period = 7
-     start_row_index = 1 
-     
-     #杈圭晫鏁版嵁 excel
-     for col in range (0,period):        
-         array =[]
-         for row in range(start_row_index, len(excel1)):
-             
-             arr = [excel1[row][2]-1,excel1[row][3]-1,excel1[row][4]-1,excel1[row][6+col]]
-             array.append(arr)
-         lrcq[col]= array
-         
-     #娌虫祦鏁版嵁 excel
-     for col in range (0,period):        
-         array =[]
-         for row in range(start_row_index, len(excel2)):
-             
-             arr = [excel2[row][2]-1,excel2[row][3]-1,excel2[row][4]-1,excel2[row][6+col]]
-             array.append(arr)
-             
-         lrcq[col].extend(array)
-         
-      #鎶芥按鏁版嵁 excel
-     for col in range (0,period):
-          
-          array =[]
-          for row in range(start_row_index, len(excel3)):
-              
-              arr = [excel3[row][1]-1,excel3[row][2]-1,excel3[row][3]-1,excel3[row][8+col]]
-              array.append(arr)
-              
-          lrcq[col].extend(array)   
-     
-     return lrcq
- 
-    
-
-#鑾峰彇cell鏂囦欢
-#file_path 鏂囦欢鐨勮矾寰�
-def get_cell_data(file_path):
-    
-    workbook = load_workbook(file_path)
-    sheetnames = workbook.get_sheet_names()
-    #read  first  sheet
-    sheet1 = workbook[sheetnames[0]]
-    sheet2 = workbook[sheetnames[1]]
-    sheet3 = workbook[sheetnames[2]]
-
-    excel1 =[]
-    excel2=[]
-    excel3=[]
-    # 閬嶅巻璇诲彇鏁翠釜宸ヤ綔琛�
-    for row in sheet1.iter_rows(values_only=True):
-        array=[]
-        for cell in row:
-            array.append(cell)
-        excel1.append(array)
-    
-    for row in sheet2.iter_rows(values_only=True):
-        array=[]
-        for cell in row:
-            array.append(cell)
-        excel2.append(array) 
-        
-    for row in sheet3.iter_rows(values_only=True):   
-        array=[]
-        for cell in row:
-            array.append(cell)
-        excel3.append(array)
-   
-    # 鍏抽棴Excel鏂囦欢
-    workbook.close()
-    data={"excel1":excel1,"excel2":excel2,"excel3":excel3}
-
-    return data
-
 
 
 #璇诲彇wel鏂囦欢 鍙傛暟涓� Period
 @app.route('/iboundList/', methods=['GET'])
 def iboundList():
 
-    return jsonify(iboundGroupSize)
+    return jsonify(iboundName)
 
 
 #杈圭晫鐨勫垎缁勬暟鎹�
@@ -360,6 +299,10 @@
     data = json['data']
     
     model_name = json['model_name']
+    
+    if model_name == base.not_allowed_model or model_name in base.archive_models:
+        return jsonify("鏈ā鍨嬩负楠岃瘉妯″瀷,涓嶅厑璁镐慨鏀癸紒")
+    
     ml= getModel(model_name)
     
     index = iboundGroup[no]
@@ -392,7 +335,10 @@
             
         lrcq[per] = array2d 
 
-    flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
+    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,
+                                 dtype=ml.wel.dtype,
+                                 options=ml.wel.options,
+                                 stress_period_data=lrcq)
     ml.write_input()
     return jsonify("鏁版嵁鏇存柊瀹屾瘯锛�")
 
@@ -458,6 +404,9 @@
     end_index = index[1]
     model_name = json['model_name']
     
+    if model_name == base.not_allowed_model or model_name in base.archive_models:
+        return jsonify("鏈ā鍨嬩负楠岃瘉妯″瀷,涓嶅厑璁镐慨鏀癸紒")
+    
     ml= getModel(model_name)
     
     periods =CalHead.get_model_period(model_name)
@@ -487,7 +436,11 @@
             
         lrcq[per] = array2d 
 
-    flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
+
+    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,
+                                 dtype=ml.wel.dtype,
+                                 options=ml.wel.options,
+                                 stress_period_data=lrcq)
     ml.write_input()
     return jsonify("鏁版嵁鏇存柊瀹屾瘯锛�")
 
@@ -529,55 +482,14 @@
     return jsonify(result)
 
 
-#闄嶆按鏁版嵁淇敼
-# @app.route('/precipitationInput', methods=['POST'])
-# def precipitationInput():
-     
-#     json = request.get_json()
-#     model_name= str(json['model_name'])
-#     period = int(json['period'])
-#     #鏈夊簭鐨勫簲鍔涘懆鏈熷垪琛� json
-#     data = json['data']
-#     dict = {}
-#     for i in range(len(data)):
-#         q1 = data[i]['Q1']
-#         q2 = data[i]['Q2']
-#         dict[q1] = q2
-        
-#     ml= getModel(model_name)
-    
-#     item = ml.rch.rech.__getitem__(kper=period)
-#     array2d = item.get_value()
-    
-#     count = 0
-
-#     array2d_len = len(array2d)
-
-#     for i in range(array2d_len):
-
-#         array_len = len(array2d[i])
-
-#         for j in range(array_len):
-            
-#             va = str(array2d[i][j])
-#             if va in dict:
-#                 count += 1
-#                 array2d[i][j] = float(dict[va]) 
-
-#     ml.rch.rech.__setitem__(key=period, value=array2d)
-    
-#     rch = flopy.modflow.ModflowRch(ml, rech=ml.rch.rech)
-#     rch.write_file(check=False)
-#     #ml.write_input()
-
-#     return jsonify("闄嶆按鍙傛暟淇敼瀹屾瘯锛�")
-
 
 @app.route('/precipitationInput', methods=['POST'])
 def precipitationInput():
      
     json = request.get_json()
     model_name= str(json['model_name'])
+    if model_name == base.not_allowed_model or model_name in base.archive_models:
+        return jsonify("鏈ā鍨嬩负楠岃瘉妯″瀷,涓嶅厑璁镐慨鏀癸紒")
     period = int(json['period'])
     #鏈夊簭鐨勫簲鍔涘懆鏈熷垪琛� json
     data = json['data']
@@ -597,7 +509,7 @@
         
         tuples= areas[key]
         zblen= len(tuples)
-        values = float(dict[key])
+        values = round(float(dict[key]),8) 
         for i in range(zblen):
             x = tuples[i][0]
             y = tuples[i][1]
@@ -605,106 +517,14 @@
         
     ml.rch.rech.__setitem__(key=period, value=array2d)
     
-    rch = flopy.modflow.ModflowRch(ml, rech = ml.rch.rech)
+    rch = flopy.modflow.ModflowRch(ml,nrchop=ml.rch.nrchop,
+                                       ipakcb=ml.rch.ipakcb,
+                                       rech=ml.rch.rech,
+                                       irch =ml.rch.irch)
     rch.write_file(check=False)
     # ml.write_input()
 
     return jsonify("闄嶆按鍙傛暟淇敼瀹屾瘯锛�")
-
-#瀵煎叆csv鏂囦欢
-@app.route('/precipitationInputFile', methods=['POST'])
-def precipitationInputFile():
-    
-    model_name = request.args.get('model_name') 
-    ml= getModel(model_name)
-    save_path = 'C:/Users/ZMK/Desktop/test1/' + "1111.xlsx"  
-    file = request.files.get('file')
-    
-    if file: 
-        file.save(save_path)
-      
-        #鑾峰彇璇诲彇鐨別xcel 琛ㄦ牸鏁版嵁
-        stations = get_station_struct(save_path) 
-        
-        #寰幆鍛ㄦ湡
-        #perd鍛ㄦ湡鍙橀噺
-        #array2d 姣忎釜鍛ㄦ湡鐨勪簩缁存暟缁�
-        for perd in range(0,36):
-            period = perd
-            item = ml.rch.rech.__getitem__(kper=period)
-            array2d = item.get_value()
-           
-            array2d_len = len(array2d)
-            count = 0
-            #寰幆绔欑偣灏嗕竴涓猵eriod鐨勬墍鏈塻tations杩涜瀛楀吀瀛樺偍
-            dict = {}
-            for k in range(0,len(stations)):
-                row = stations[k]["row"]
-                column = stations[k]["column"]
-                
-                data_old = array2d[row][column]
-                data_new = stations[k]["data"][perd]
-                dict[data_old]= data_new
-                
-            
-            #寰幆璁剧疆姣忎釜period 鐨勫��
-            for i in range(array2d_len):
-                
-              array_len = len(array2d[i])
-
-              for j in range(array_len):
-                  
-                  va = str(array2d[i][j])
-                  if va in dict:
-                      array2d[i][j] = float(dict[va]) 
-
-            #灏哸rray2d閲嶆柊set 瀵瑰簲鐨� item 鍛ㄦ湡鍐�
-            ml.rch.rech.__setitem__(key=period, value=array2d)
- 
-        rch = flopy.modflow.ModflowRch(ml, rech=ml.rch.rech)
-        rch.write_file(check=False)
-        # ml.write_input() 
-        return '鏂囦欢涓婁紶鎴愬姛'
-    else:
-        return '涓婁紶澶辫触锛屾湭閫夋嫨鏂囦欢'
-    
-    
-#鑾峰彇绔欑偣鐨勬暟鎹紝鏋勯�犳暟鎹粨鏋�
-#file_path 鏂囦欢鐨勮矾寰�
-def get_station_struct(file_path):
-    
-    workbook = load_workbook(file_path)
-    sheetnames = workbook.get_sheet_names()
-    #read  first  sheet
-    sheet = workbook[sheetnames[0]]
-
-    array2d_excel=[]
-    # 閬嶅巻璇诲彇鏁翠釜宸ヤ綔琛�
-    for row in sheet.iter_rows(values_only=True):
-        array=[]
-        for cell in row:
-            array.append(cell)
-        array2d_excel.append(array)
-    # 鍏抽棴Excel鏂囦欢
-    workbook.close()
-
-    #鏁版嵁鐨勫紑濮嬩笅鏍�
-    data_start_index=6
-    #淇℃伅鐨勫紑濮嬭鍙�
-    start_row_index = 1
-    #瀛樺偍绔欑偣淇℃伅
-    stations = []
-    for i in range (start_row_index,len(array2d_excel)):
-        st={"name":array2d_excel[i][1],"row":array2d_excel[i][4],"column":array2d_excel[i][5]}  
-        data=[]
-        for j in range(data_start_index,len(array2d_excel[i])):
-            cell_data = array2d_excel[i][j]
-            cell_data= cell_data/100/30*0.15
-            data.append(round(cell_data, 6))
-        st["data"]= data
-        stations.append(st)
-    
-    return stations
     
 
 #杩愯妯″瀷
@@ -712,22 +532,30 @@
 def runModel():
     model_name = request.args.get('model_name')
     
-    msg= Predict.run_model_predict(model_name)
-    #瀵煎嚭csv鏂囦欢
-    csvpath = CalHead.exportCsV(model_name)
+    if model_name == base.not_allowed_model or model_name in base.archive_models:
+        return jsonify("鏈ā鍨嬩负楠岃瘉妯″瀷,涓嶅厑璁镐慨鏀癸紒")
     
-    #鏇存柊妯″瀷涓夌淮缃戞牸閰嶇疆
+    dicts= Predict.run_model_predict(model_name)
+    if dicts["code"] == 400:
+        return dicts["msg"]
+    #瀵煎嚭csv鏂囦欢
+    CalHead.exportCsV(model_name)
+    
+    # #鏇存柊妯″瀷涓夌淮缃戞牸閰嶇疆
     base.updateModelConfig(model_name)
     
-    #鍒涘缓妯″瀷鐨勪笁缁寸綉鏍�
+    # #鍒涘缓妯″瀷鐨勪笁缁寸綉鏍�
     filedir = base.model3d_path + model_name
     
-    print(filedir)
     if not os.path.exists(filedir):
         os.makedirs(filedir, exist_ok=True)
-        base.callModelexe()
         
-    return jsonify(msg)
+    base.callModelexe() 
+    #璁$畻姘磋祫婧愰噺鍜屾按鍧囪 
+    CalHead.run_zonebudget_bal(model_name)
+    CalHead.run_zonebudget_res(model_name)
+    
+    return jsonify(dicts["msg"])
 
 #鐢熸垚妯″瀷csv 鏂囦欢
 @app.route('/runModelCsv/', methods=['GET'])
@@ -739,23 +567,6 @@
     return jsonify(result)
 
 
-
-#鍒濆姘翠綅淇℃伅
-@app.route('/initWater/', methods=['GET'])
-def initWater():
-
-    period = request.args.get('period')
-    
-    per = int(period)
-    
-    model_name = request.args.get('model_name')
-    
-    ml= getModel(model_name)
-    item = ml.rch.rech.__getitem__(kper=per)
-    value = item.get_value()
-    t = np.array(value).tolist()
-    return jsonify(t)
-
 #鍒涘缓鏂版ā鍨�
 @app.route('/saveModel/', methods=['GET'])
 def saveModel():
@@ -763,29 +574,65 @@
     modelname = request.args.get('name')
     startTime = request.args.get('startTime')
     endTime = request.args.get('endTime')
+    remark = request.args.get('remark')
     file_list = os.listdir(base.model_dir)
     for name in file_list:
         if name == modelname:
              return jsonify("妯″瀷鍚嶇О宸茬粡瀛樺湪锛屼笉鍏佽閲嶅鍒涘缓锛�")
+         
+    pers = ModelPeriod.get_months_in_range_count(startTime,endTime)
+    if pers > 60 :
+       return jsonify("妯″瀷鍒涘缓澶辫触锛屾渶澶氬彧鍏佽60涓懆鏈熺殑杩炵画棰勬祴锛�")
     
     dir = base.model_dir + modelname  
-    shutil.copytree(base.predictModel,dir) 
+    
+    if pers==12:
+        shutil.copytree(base.predictModel,dir) 
+    else:
+        #濡傛灉涓嶆槸涓�骞寸殑棰勬祴锛� 鍙兘涓� 鍏朵粬鐨勫鍛ㄦ湡鐨勯娴� 澶т簬 12 涓懆鏈熸垨鑰� 灏忎簬12涓懆鏈�
+        #棣栧厛鎶�60涓懆鏈熺殑褰撳仛妯″瀷clone涓�浠斤紝 鐒跺悗淇敼 dis wel rch 鏂囦欢
+        #wel 鏂囦欢鍜� rch 鏂囦欢涓嶉渶瑕佷慨鏀�
+        shutil.copytree(base.predictModel60,dir) 
+        Predict.updateDisFile(modelname,pers)
+        
     
     jsondata={"model_name":modelname,"start_time":startTime,"end_time":endTime}
     predictionJson = base.model_dir + modelname +"\\prediction.json"
     with open(predictionJson, "w",encoding='utf-8') as outfile:
         json.dump(jsondata, outfile,ensure_ascii=False)
-        
+    
+    CalHead.addModelJson(modelname, startTime, endTime, remark)
     return jsonify("鍒涘缓鏂版ā鍨嬪畬姣曪紒")
 
 
 #鍒涘缓鏂版ā鍨�
 @app.route('/ModelList/', methods=['GET'])
 def ModelList():
-
     file_list = os.listdir(base.model_dir)
     return jsonify(file_list)
 
+#妯″瀷鍒楄〃2
+@app.route('/ModelList2/', methods=['GET'])
+def ModelList2():
+    model_path = base.prefix  +"\\model_list.json"
+    model_lsit=""
+    with open(model_path,encoding='utf-8') as f:
+             model_lsit = json.load(f)    
+    return jsonify(model_lsit)
+
+#鍒犻櫎妯″瀷
+@app.route('/deleteModel/', methods=['GET'])
+def deleteModel():
+    model_name = request.args.get('model_name') 
+    if not model_name:
+         return jsonify({"code":400,"msg":"涓嶅厑璁稿垹闄ょ┖鐩綍锛�"}) 
+    if model_name == base.not_allowed_model:
+         return jsonify({"code":400,"msg":"楠岃瘉妯″瀷涓嶅厑璁稿垹闄わ紒"})
+   
+    paths = base.model_dir + model_name
+    shutil.rmtree(paths)  
+    CalHead.removeModelJson(model_name)
+    return jsonify({"code":200,"msg":"妯″瀷鍒犻櫎瀹屾瘯锛�"})
 
 #棰勬祴鍦烘櫙鍙傛暟
 @app.route('/prediction', methods=['POST'])
@@ -800,15 +647,19 @@
     predictionJson = base.model_dir + model_name +"\\prediction.json"
     with open(predictionJson, "w",encoding='utf-8') as outfile:
         json.dump(jsondata, outfile,ensure_ascii=False)
-           
-    return jsonify("淇濆瓨棰勬祴鍦烘櫙鍙傛暟瀹屾瘯锛�")
+    
+    #杩愯妯″瀷
+    dicts = Predict.run_model(model_name)     
+    return jsonify(dicts["msg"])
+
 
 
 #棰勬祴鍦烘櫙鍙傛暟
 @app.route('/predictionparam', methods=['GET'])
 def predictionparam():
 
-    model_name = request.args.get('model_name')       
+    model_name = request.args.get('model_name')     
+    print(model_name)
     file_list = os.listdir(base.model_dir)
     if model_name not in file_list:
         return jsonify("妯″瀷涓嶅瓨鍦紒")
@@ -862,28 +713,31 @@
 def pump_importdata():
             
     model_name = request.form.get('model_name')   
-   
-    # ml= getModel(model_name)
+    types = request.form.get('type')   
     file = request.files.get('file')
+    print(types)
     
-    save_path = base.model_dir + model_name +"\\extra_cell.xlsx"
+    save_path = base.model_dir + model_name +"\\"+types+".xlsx"
+    print(save_path)
    
     if file: 
         file.save(save_path)
     
-    resultDict={"code":200,"msg":"淇濆瓨鏁版嵁瀹屾瘯锛�"}
+    data= DataTransf.base_excel(model_name,types)
+    
+    resultDict={"code":200,"msg":"淇濆瓨鏁版嵁瀹屾瘯锛�","data":data}
     return jsonify(resultDict)
 
 #瑙傛祴浜曞垪琛�
 @app.route('/obsWellList', methods=['GET'])
 def obsWellList():
     obswell= base.obs_well
-    dict =[]
-    for name , row ,column in obswell:
-        obj ={"name":name,"row":row,"column":column,"Layer":1}    
-        dict.append(obj)
+    dicts =[]
+    for wellId, name , row ,column in obswell:
+        obj ={"wellId":wellId,"name":name,"row":row,"column":column,"Layer":1}    
+        dicts.append(obj)
     
-    return jsonify(dict)
+    return jsonify(dicts)
     
  
 #瑙傛祴浜昪hart鏁版嵁鎺ュ彛
@@ -892,8 +746,9 @@
     model_name = request.args.get('model_name') 
     row = request.args.get('row')  
     column = request.args.get('column')  
+    wellId = request.args.get('wellId')  
 
-    result = CalHead.obsChartdata(model_name, row, column)
+    result = CalHead.obsChartdata(wellId,model_name, row, column)
    
     return jsonify(result)
 
@@ -904,8 +759,8 @@
     base_year = request.args.get('base_year') 
     start_time = request.args.get('start_time')  
     end_time = request.args.get('end_time')  
-
-    return jsonify(Predict.predict_river_chart(base_year, start_time, end_time))
+    value = float(request.args.get('value')) 
+    return jsonify(Predict.predict_river_chart(base_year, start_time, end_time,value))
 
 #棰勬祴椤甸潰 闄嶆按鍥捐〃
 @app.route('/predictWaterChart', methods=['GET'])
@@ -913,15 +768,57 @@
     base_year = request.args.get('base_year') 
     start_time = request.args.get('start_time')  
     end_time = request.args.get('end_time') 
-    return jsonify(Predict.predict_water_chart(base_year, start_time, end_time))
+    value = float(request.args.get('value')) 
+    return jsonify(Predict.predict_water_chart(base_year, start_time, end_time,value))
 
-@app.route('/heatmap', methods=['GET'])
-def heatmap():
+#寮�閲囬噺鐨勬姌绾垮浘
+@app.route('/predictWellChart', methods=['POST'])
+def predictWellChart():
+    
+    json = request.get_json()
+    base_year = str(json['base_year'])
+    start_time = json['start_time']
+    end_time = json['end_time']
+    data = json['data'] 
+  
+    return jsonify(Predict.predict_well_chart(base_year, start_time, end_time,data))
+
+#娴佸満鍥�
+@app.route('/flowField', methods=['GET'])
+def flowField():
     model_name = request.args.get('model_name') 
-    period = request.args.get('period')  
-    data = CalHead.heatmapdata(model_name,period)
-    maximum_value = np.max(data)
-    return jsonify(np.array(data).tolist())
+    flowStartTime = int(request.args.get('flowStartTime')) 
+    flowEndTime=int(request.args.get('flowEndTime')) 
+    flowType= request.args.get('flowType')
+    # layer = int(request.args.get('layer') ) 
+    
+    pic = str(int(time.time())) +".png"   
+    outpath = base.flow_file + pic
+    dicts={}
+    if flowType=="姘翠綅":
+        dicts = achiveReport.flow_field(model_name, flowStartTime, 0,"娴佸満淇℃伅", "online", outpath)
+    if flowType=="鍙樺箙":
+        dicts = achiveReport.getFlowFieldBF(model_name,flowStartTime,flowEndTime,outpath)
+       
+    if flowType=="鍩嬫繁":
+       dicts = achiveReport.getFlowFieldDepth(model_name,flowStartTime, 0,"娴佸満淇℃伅", "online", outpath)   
+     
+    dicts["path"] = "/xishan/xinshanFlow/"+pic
+    return jsonify(dicts)
+
+
+#鍒濆姘翠綅娴佸満淇℃伅
+@app.route('/initWaterFlow/', methods=['GET'])
+def initWater():
+    
+    model_name = request.args.get('model_name')  
+    layer = int(request.args.get('layer') ) 
+    pic = str(int(time.time())) +".png"
+    outpath = base.flow_file + pic
+    result=  achiveReport.init_flow_field(model_name, layer,"鍒濆娴佸満淇℃伅", "online", outpath)
+    if result == "#":
+         return jsonify("")   
+    return jsonify("/xishan/xinshanFlow/"+pic)
 
 
 #闄嶆按鍧囪 
@@ -935,15 +832,104 @@
 #鍦扮悆椤甸潰鏁版嵁
 @app.route('/earthWaterChart', methods=['GET'])
 def earthWaterChart():
-    
+    model_name = request.args.get('model_name')
     indexId = int(request.args.get('index_id')) 
-    data =  CalHead.earthWaterChart("202001_202212",indexId)
+    data =  CalHead.earthWaterChart(model_name,indexId)
     return jsonify(data)
+
+#娓楅�忕郴鏁�
+@app.route('/mdLpf', methods=['GET'])
+def mdLpf():    
+    lf = base.md_lpf 
+    return jsonify(np.array(lf).tolist())
+
+#姘磋祫婧愰噺
+@app.route('/water_res', methods=['GET'])
+def water_res():    
+    model_name = request.args.get('model_name')     
+    #姘村潎琛�
+    path1=base.muiltyModel + model_name +"\\water_bal.txt"  
+    bal = CalHead.water_balance(model_name, path1) 
+    path2=base.muiltyModel + model_name +"\\water_res.txt" 
+    res =  CalHead.water_res(model_name,path2) 
     
+    dicts =  BigData.mergeWaterData(bal,res)
+
+    initFlowPNG = achiveReport.getWaterResFiled(model_name,0)
+    dicts["initFlowPNG"]= "/xishan/xinshanFlow/" + initFlowPNG   
+    
+    FlowPNG2 = achiveReport.getWaterResFiled(model_name,11)
+    dicts["lastFlowPNG"]= "/xishan/xinshanFlow/" + FlowPNG2 
+    dicts["initMonth"] ="2023-01"
+    dicts["lastMonth"] ="2023-12"
+    
+    #姘磋祫婧愰噺
+    return dicts
+
+
+#澶氭ā鍨嬪湴涓嬫按浣�
+@app.route('/water_depth', methods=['GET'])
+def water_depth():    
+    model_name = request.args.get('model_name')     
+
+    result = CalHead.water_depth(model_name)
+    #姘磋祫婧愰噺
+    return result
+
+
+#灏忓満鏅� 鍗曚釜妯″瀷姘翠綅 鍜屽彉骞�
+@app.route('/xs_depth', methods=['GET'])
+def xs_depth():    
+    model_name = request.args.get('model_name')     
+
+    res = CalHead.xs_depth(model_name)
+    jsondata= CalHead.get_model_json(model_name)
+    start_time = jsondata["start_time"]
+    end_time = jsondata["end_time"] 
+    months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
+    res["months"] = months
+    #姘磋祫婧愰噺
+    return res
+
+#淇濆瓨鐩戞祴绔欓檷闆ㄩ噺
+@app.route('/sensor_jyl_list', methods=['GET'])
+def sensor_jyl_list():    
+    model_name = request.args.get('model_name')     
+    data = OpenExcel.read_excel(model_name)
+    return jsonify(data)
+
+#淇濆瓨鐩戞祴绔欓檷闆ㄩ噺
+@app.route('/sensor_jyl_save', methods=['POST'])
+def sensor_jyl_save():   
+    json = request.get_json()
+    model_name = str(json['model_name'])
+    data = json['data'] 
+    OpenExcel.write_excel(model_name,data)
+    return jsonify("淇濆瓨瀹屾瘯")
+
+#鏈堟姤鍐呭
+@app.route('/xs_month_report', methods=['GET'])
+def xs_month_report():   
+    model_name = request.args.get('model_name') 
+    per = int(request.args.get('period'))
+    res = achiveReport.archive_report_content(model_name,per)
+    return res
+
+
+#瀹炴椂鏁版嵁
+@app.route('/xs_real_data', methods=['GET'])
+def xs_real_data():   
+    num = request.args.get('num')
+    start_time = request.args.get('start_time')
+    end_time = request.args.get('end_time')
+    types = request.args.get('types')
+    res = DataTask.get_data(types,num,start_time,end_time)
+    return jsonify(res)
+
+
 if __name__ == '__main__':
     #app.run()    # 鍙互鎸囧畾杩愯鐨勪富鏈篒P鍦板潃锛岀鍙o紝鏄惁寮�鍚皟璇曟ā寮�
-    app.run(host="192.168.0.122", port=5000)
-
-
-
+    app.run(host="localhost", port=5000)
+    
+    
 
diff --git a/ModelPeriod.py b/ModelPeriod.py
index 7c9a407..c7c0e25 100644
--- a/ModelPeriod.py
+++ b/ModelPeriod.py
@@ -7,10 +7,6 @@
 from datetime import datetime 
 import calendar
 from dateutil import rrule
-import json
-import Base as base
-import os
-
 
 
 #鏍规嵁骞存湀鏃ヨ幏鍙栨渶鍚庝竴澶╃殑鏃ユ湡
@@ -26,7 +22,8 @@
     last_day= last_day_of_month(int(start_date[0]),int(start_date[1]),1)
     return last_day
     
-    
+def get_months_in_year():
+    return ["1鏈�","2鏈�","3鏈�","4鏈�","5鏈�","6鏈�","7鏈�","8鏈�","9鏈�","10鏈�","11鏈�","12鏈�",]
 
 def get_months_in_range_ym(start_time, end_time):  
 
diff --git a/OpenExcel.py b/OpenExcel.py
new file mode 100644
index 0000000..95f2d5d
--- /dev/null
+++ b/OpenExcel.py
@@ -0,0 +1,61 @@
+
+from openpyxl import load_workbook
+import os
+import shutil
+import Base as base
+
+
+def save_excel(model_name):
+   dst = base.model_dir + model_name +"\\jyl.xlsx"
+   shutil.copyfile(base.prefix +"闆ㄩ噺绔欒緭鍏ユā鏉�.xlsx",dst) 
+    
+#璇诲彇excel妯℃澘鏁版嵁
+def read_excel(model_name):
+    paths =  base.model_dir + model_name +"\\jyl.xlsx"
+    data=[]
+    if not os.path.exists(paths):
+        return data      
+    wb = load_workbook(filename = paths)
+    ws = wb[wb.sheetnames[1]]
+   
+    for row in ws.iter_rows():
+       tmp =[]
+       for cell in row:
+          tmp.append(cell.value)
+       data.append(tmp)
+    wb.close()
+    return data
+
+
+#璇诲彇闄嶉洦閲忕殑璁$畻鏂囦欢
+def read_jyl_excel(model_name):
+    paths =  base.model_dir + model_name +"\\jyl.xlsx"
+    data=[]
+    if not os.path.exists(paths):
+        return data      
+    wb = load_workbook(filename = paths,data_only=True)
+    ws = wb[wb.sheetnames[2]]
+   
+    for row in ws.iter_rows():
+       tmp =[]
+       for cell in row:
+          tmp.append(cell.value)
+       data.append(tmp)
+    wb.close()
+    return data
+
+
+def write_excel(model_name,data):
+    paths =  base.model_dir + model_name +"\\jyl.xlsx"
+    if not os.path.exists(paths):
+        save_excel(model_name)  
+    wb = load_workbook(filename = paths)
+    ws = wb[wb.sheetnames[1]]
+    for i in range(len(data)):
+        for j in range(len(data[i])):
+            ws.cell(row=i+1, column=j+1).value = data[i][j]
+    wb.save(paths)
+    wb.close()
+
+    
+    
diff --git "a/Predict - \345\211\257\346\234\254.py" "b/Predict - \345\211\257\346\234\254.py"
deleted file mode 100644
index bbbfccc..0000000
--- "a/Predict - \345\211\257\346\234\254.py"
+++ /dev/null
@@ -1,592 +0,0 @@
-
-# 瀵煎叆Flask绫�
-from flask import Flask
-from flask import jsonify
-from flask import request
-from flask_cors import CORS
-import sys
-import numpy as np
-import pandas as pd
-import flopy
-import flopy.utils.binaryfile as bf
-import csv
-import time
-from openpyxl import load_workbook
-import os
-import shutil
-import json
-import Base as base
-import CalHead
-import ModelPeriod
-
-
-# strt = ml.bas6.strt
-# # strs = ml.bas6.strt.__getitem__(1)
-# # print(strs.get_value())
-
-# mdBase = flopy.modflow.ModflowBas(ml,strt=1.0,ibound=ml.bas6.ibound)
-# mdBase.write_file(check=False)
-
-
-base_init_year=["2020","2021","2022"]
-river_start_index = 454
-river_end_index =562
-
-#棰勬祴鍛ㄦ湡鏁�
-predict_per = 12
-
-#闄嶆按閲�
-# def predict_water_chart(base_year,start_time ,end_time):
-#      model_ws = base.baseModel
-#      baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
-#                                     exe_name="mf2005", verbose=True,  version="mf2005", check=False)   
-#      index = 0
-#      if base_year in base_init_year:
-#          index = base_init_year.index(str(base_year))
-         
-#      y_data=[]
-#      x_data=[]
-#      satrt_index = index*12
-#      end_index = satrt_index+12
-#      for per in range(satrt_index,end_index):
-#          item = baseMdoel.rch.rech.__getitem__(kper=per)
-#          value = item.get_value()
-#          value_float = np.array(value)
-#          avg = value_float.mean()
-#          y_data.append(float (avg))
-         
-#      start_month = str(base_year) +"-01"
-#      end_month = str(base_year) +"-12"
-#      x_data= ModelPeriod.get_months_in_range_ym(start_month,end_month)
-#      result = {"y_data": y_data, "x_data": x_data}
-#      return result
-
-base_water = base.prefix + 'base_water.ini'
-def predict_water_chart(base_year,start_time ,end_time):
-    
-    
-     water_array = np.loadtxt(base_water, dtype=str,encoding='utf-8')
-     print(water_array)
-     y_data=[]
-     x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12")
-     water= water_array[0]
-     for e in water:
-         y_data.append(e)
-    
-     result = {"y_data": y_data, "x_data": x_data}
-     return result
-    
-#娌虫祦鐨勬姌绾垮浘
-# def predict_river_chart(base_year,start_time ,end_time):
-#      model_ws = base.baseModel
-#      baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
-#                                     exe_name="mf2005", verbose=True,  version="mf2005", check=False)    
-#      index = 0
-#      if base_year in base_init_year:
-#          index = base_init_year.index(str(base_year))
-         
-#      y_data=[]
-#      x_data=[]
-#      satrt_index = index*12
-#      end_index = satrt_index+12 
-#      for per in range(satrt_index,end_index):
-#         wel = baseMdoel.wel.stress_period_data.__getitem__(kper=per)
-#         arr=[]
-#         for i in range(river_start_index, river_end_index):
-#             Q = wel[i][3]
-#             arr.append(float(Q))
-#         avg = np.array(arr).mean()
-#         y_data.append(float(avg))    
-#      start_month = str(base_year) +"-01"
-#      end_month = str(base_year) +"-12"
-#      x_data= ModelPeriod.get_months_in_range_ym(start_month,end_month)
-#      result = {"y_data": y_data, "x_data": x_data}
-#      return result
-
-base_river = base.prefix + 'base_river.ini'
-def predict_river_chart(base_year,start_time ,end_time):
-     
-    
-     river_array = np.loadtxt(base_river, dtype=str,encoding='utf-8')
-     print(river_array)
-     y_data=[]
-     x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12")
-     for e in river_array:
-         y_data.append(e)
-         
-     result = {"y_data": y_data, "x_data": x_data}
-     return result
-
-
-def run_model_predict(model_name):
-       
-    predictiondata=""   
-    prediction_path = base.model_dir + model_name +"\\prediction.json"
-    if os.path.exists(prediction_path):
-        with open(prediction_path,encoding='utf-8') as f:
-             predictiondata = json.load(f)
-    
-
-    if predictiondata:
-  
-            per =  ModelPeriod.get_months_in_range_count(
-                predictiondata["start_time"], predictiondata["end_time"])
-                    
-            # updateDisFile(model_name,per)
-            
-            # updateBase6File(model_name,predictiondata)
- 
-            #updateRchFile(model_name,predictiondata)
-            
-            updateRiverFile(model_name,predictiondata)
-            
-            #updateMineFile(model_name,predictiondata)
-    else:
-        print("prediction.json 棰勬祴鍦烘櫙鏂囦欢涓虹┖锛屾棤闇�鏇存敼鐩稿簲鏂囦欢")
-    
-    
-    # model_ws = base.model_dir + model_name
-
-    # ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
-    #                                 exe_name="mf2005", verbose=True,  version="mf2005", check=False)   
-    # ml.run_model(report = True)
-    return jsonify("杩愯鎴愬姛锛�")
-
-
-
-
-#鏇存柊閲囬泦鍖虹殑鏁版嵁锛屽垎涓�1.鎸夌収鍖哄煙锛�2.鎸夌収鍏ㄩ儴 杩涜鏇存柊
-def updateMineFile(model_name,predictiondata):
-    
-    start_time =predictiondata["start_time"] 
-    end_time = predictiondata["end_time"]   
-    base_year = predictiondata["mine"]["base_year"]  
-    
-    base_start= str(base_year) + "-" +  str(start_time.split("-")[1])
-    base_end= str(base_year) + "-" +  str(end_time.split("-")[1])
-    
-    start_index = (int)(base.times_month_per_dict[base_start])
-    end_index = (int)(base.times_month_per_dict[base_end])
-    
-    pers= end_index-start_index + 1  
-    
-    area= predictiondata["mine"]["area"]       
-    flag = check_mine_param(predictiondata) 
-    
-    if flag == 'true':
-        baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base.baseModel,
-                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)     
-        update_model_ws = base.model_dir + model_name
-        updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
-                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-        
-        district_dict = get_distric_dict()
-  
-        area_dict = get_area_dict(area)
-                
-        lrcq = {}
-        for per in range(pers):
-            wel = [] 
-            wel = baseMdoel.wel.stress_period_data.__getitem__(kper = (per + start_index ))
-            array2d = []      
-            count = 1      
-            for Layer, Row, Column, Q in wel:
-                array = []
-                # 濡傛灉鏄渤娴佺殑鏁版嵁鑼冨洿
-                if count > river_end_index :  
-                    
-                     r = (float) (get_row_column_ratio(Row, Column, district_dict, area_dict))
-                    
-                     array = [Layer, Row, Column, Q * r]
-                     
-                else:
-                     array = [Layer, Row, Column, Q]
-                              
-                array2d.append(array)  
-                count +=1
-            
-            lrcq[per] = array2d 
-            
-        flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq)
-        updateMdoel.write_input()          
-    else:
-           print("Well--Mine鏂囦欢鏃犻渶淇敼锛�")    
- 
-    
-#鑾峰彇 area鐨� name--> ratio 鐨勭粨鏋�
-def get_area_dict(area):
-    result ={}
-    
-    for i in range(len(area)):
-        name = area[i]["name"]
-        rt = area[i]["ratio"]
-        result[name]= rt
-    return result
-
-
-#鑾峰彇鍖哄幙鐨� row+column --> name缁撴瀯
-def get_distric_dict():
-    data =  base.district   
-    result = {}
-    for row ,column ,id ,name in data:
-        key = str(row)+","+str(column)
-        result[key]= name 
-    return result
-    
-    
-#鏍规嵁 row clomn  鑾峰彇 ratio
-def get_row_column_ratio(row, column ,district_dict, area_dict ):
-     key = str(row) +"," + str(column)
-     if area_dict.__contains__("鍏ㄩ儴鍖哄煙"):
-         return area_dict["鍏ㄩ儴鍖哄煙"]
-     
-     if district_dict.__contains__(key):
-          name = district_dict[key]
-          ratio = area_dict[name]
-          return float(ratio)
-        
-     return float(1.0)
-    
- 
-    
-def check_mine_param(predictiondata):
-    
-     mine = predictiondata["mine"]
-     if not mine:
-         print("鎶芥按浜� 棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
-         return "false"
-     
-     base_year = predictiondata["mine"]["base_year"]
-     if not base_year :
-         print(" Mine : base_year棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
-         return "false"
-     
-     area= predictiondata["mine"]["area"]
-     if not area  :
-         print(" Mine : area棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
-         return "false"
-     
-     return "true"
-
-
-#鏇存柊娌虫祦鐨勫�嶆暟
-# def updateRiverFile(model_name,predictiondata):
-  
-#     start_time =predictiondata["start_time"] 
-#     end_time = predictiondata["end_time"] 
-#     base_year = predictiondata["river"]["base_year"]  
-    
-#     ratio= float(predictiondata["river"]["ratio"])
-    
-#     base_start= str(base_year) + "-" +  str(start_time.split("-")[1])
-#     base_end= str(base_year) + "-" +  str(end_time.split("-")[1])
-    
-#     start_index = (int)(base.times_month_per_dict[base_start])
-#     end_index = (int)(base.times_month_per_dict[base_end])
-    
-#     pers= end_index-start_index + 1
-    
-
-#     flag = check_river_param(predictiondata)
-    
-#     if flag == "true":
-        
-#         baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base.baseModel,
-#                                     exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-        
-#         update_model_ws = base.model_dir + model_name
-#         updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
-#                                     exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-          
-#         lrcq = {}
-#         for per in range(pers):
-#             wel = [] 
-
-#             wel = baseMdoel.wel.stress_period_data.__getitem__(kper = (per + start_index ))
-#             array2d = []
-            
-#             count = 1
-            
-#             for Layer, Row, Column, Q in wel:
-#                 array = []
-#                 # 濡傛灉鏄渤娴佺殑鏁版嵁鑼冨洿
-#                 if count > river_start_index and count <= river_end_index:             
-#                      array = [Layer, Row, Column, Q * ratio]
-#                 else:
-#                      array = [Layer, Row, Column, Q]
-                              
-#                 array2d.append(array)  
-#                 count +=1
-            
-#             lrcq[per] = array2d 
-            
-#         flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq)
-#         updateMdoel.write_input()          
-                               
-#     else:     
-#         print("Well--River鏂囦欢鏃犻渶淇敼锛�")
-
-
-def updateRiverFile(model_name,predictiondata):
-  
-    start_time =predictiondata["start_time"] 
-    end_time = predictiondata["end_time"] 
-   
-    
-    river_ratio= float(predictiondata["river"]["ratio"])
-  
-    rain_ratio = float(predictiondata["rain"]["ratio"])
-    rain_base_year = predictiondata["rain"]["base_year"]
-    
-    area= predictiondata["mine"]["area"] 
-
-    flag = check_river_param(predictiondata)
-    
-    if flag == "true":
-        
-        ws = base.predictParamModel + rain_base_year
-        
-        baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= ws,
-                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-        
-        update_model_ws = base.model_dir + model_name
-        updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
-                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-        
-        district_dict = get_distric_dict()
-  
-        area_dict = get_area_dict(area)
-        
-        lrcq = {}
-        
-        for per in range(predict_per):
-            wel = [] 
-
-            wel = baseMdoel.wel.stress_period_data.__getitem__(kper = per)
-            wel_len = len(wel)
-            
-            #渚у悜杈圭晫
-            for i in range (0,453):
-                wel[i][3] = wel[i][3] * rain_ratio
-                     
-            #娌虫祦
-            for i in range(453, 562):
-                 wel[i][3] = wel[i][3] * river_ratio
-                     
-            #鎶芥按浜�
-            for i in range(562,wel_len):
-                
-                r = (float) (get_row_column_ratio(wel[i][1], wel[i][2], district_dict, area_dict))
-                wel[i][3] = wel[i][3]  * r    
-               
-            lrcq[per] = wel 
-            
-        flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq)
-        updateMdoel.write_input()          
-                               
-    else:     
-        print("Well--River鏂囦欢鏃犻渶淇敼锛�")
-
-def check_river_param(predictiondata):
-    
-     river = predictiondata["river"]
-     if not river:
-         print("River棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
-         return "false"
-     
-     base_year = predictiondata["river"]["base_year"]
-     if not base_year :
-         print(" River : base_year棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
-         return "false"
-     
-     ratio= predictiondata["river"]["ratio"]
-     if not ratio or ratio == "1" :
-         print(" River : ratio棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
-         return "false"
-     
-     return "true"
-
-
-# def  updateRchFile(model_name,predictiondata):
-    
-#     start_time =predictiondata["start_time"] 
-#     end_time = predictiondata["end_time"] 
-    
-#     base_year = predictiondata["rain"]["base_year"]  
-#     ratio= float(predictiondata["rain"]["ratio"])
-    
-#     base_start= str(base_year) + "-" +  str(start_time.split("-")[1])
-#     base_end= str(base_year) + "-" +  str(end_time.split("-")[1])
-    
-#     start_index = (int)(base.times_month_per_dict[base_start])
-#     end_index = (int)(base.times_month_per_dict[base_end])
-#     pers= end_index-start_index + 1
-    
-    
-#     flag = check_rain_param(predictiondata)
-  
-#     if flag == "true":
-        
-#         baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base.baseModel,
-#                                     exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-        
-#         update_model_ws = base.model_dir + model_name
-#         updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
-#                                     exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-        
-#         for per in range(pers):
-            
-#             item = baseMdoel.rch.rech.__getitem__(kper = (per + start_index))
-#             array2d = item.get_value()
-#             array2d_len = len(array2d)
-            
-#             for i in range(array2d_len):
-      
-#                   array_len = len(array2d[i])
-#                   for j in range(array_len):
-                  
-#                       if str(base.area_array[i][j]) != '-9999':
-                    
-#                           array2d[i][j] =  array2d[i][j] * ratio
-                           
-#             updateMdoel.rch.rech.__setitem__(key = per, value=array2d) 
-          
-#         rch = flopy.modflow.ModflowRch(updateMdoel, rech=updateMdoel.rch.rech)
-#         rch.write_file(check=False)
-        
-#     else:
-        
-#         print("Rch鏂囦欢鏃犻渶淇敼锛�")
-    
-
-def  updateRchFile(model_name,predictiondata):
-    
-    start_time =predictiondata["start_time"] 
-    end_time = predictiondata["end_time"] 
-    
-    #涓版按骞� 鏋按骞�
-    base_year = predictiondata["rain"]["base_year"]  
-    ratio= float(predictiondata["rain"]["ratio"])
-    
-    
-    flag = check_rain_param(predictiondata)
-    
-    #鏁版嵁鏉ユ簮鐨勬ā鍨嬫枃浠跺す
-    base_ws=  base.predictParamModel + base_year
-  
-    if flag == "true":
-        
-        baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base_ws,
-                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-        
-        update_model_ws = base.model_dir + model_name
-        updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
-                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-        
-        for per in range(predict_per):
-            
-            item = baseMdoel.rch.rech.__getitem__(kper = per)
-            array2d = item.get_value()
-            array2d_len = len(array2d)
-            
-            for i in range(array2d_len):
-      
-                  array_len = len(array2d[i])
-                  for j in range(array_len):
-                  
-                      if str(base.area_array[i][j]) != '-9999':
-                    
-                          array2d[i][j] =  array2d[i][j] * ratio
-                           
-            updateMdoel.rch.rech.__setitem__(key = per, value=array2d) 
-          
-        rch = flopy.modflow.ModflowRch(updateMdoel, rech=updateMdoel.rch.rech)
-        rch.write_file(check=False)
-        
-    else:
-        
-        print("Rch鏂囦欢鏃犻渶淇敼锛�")      
-        
-def check_rain_param(predictiondata):
-    
-     rain = predictiondata["rain"]
-     if not rain:
-         print("Rch棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
-         return "false"
-     
-     base_year = predictiondata["rain"]["base_year"]
-     if not base_year :
-         print(" Rch : base_year棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
-         return "false"
-     
-     ratio= predictiondata["rain"]["ratio"]
-     if not ratio or ratio == "1" :
-         print(" Rch : ratio棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
-         return "false"
-     
-     return "true"
-     
-        
-     #鏇存柊bas6鏂囦欢 鍒濆姘村ご淇℃伅
-def updateBase6File(model_name,predictdata):
-     model_ws = base.model_dir + model_name
-     ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
-                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-     
-     
-     #鍒濆姘村ご
-     init_header = predictdata["initHeader"]
-     
-     dir = base.model_dir + init_header + "\\modflow.head"
-     head = bf.HeadFile(dir)
-     alldata = head.get_alldata()
-     
-     lens = len(alldata)
-     last_index = lens-3
-     
-     last_array3= alldata[last_index]
-
-     strt = ml.bas6.strt
-     # strs = ml.bas6.strt.__getitem__(2)
-     # print(strs.get_value())
-     strt.__setitem__(0,last_array3[0])
-     strt.__setitem__(1,last_array3[1])
-     strt.__setitem__(2,last_array3[2])
-     
-    
-     mfBase6 = flopy.modflow.ModflowBas(
-          ml,
-          strt= strt,
-          ibound=ml.bas6.ibound,
-          hnoflo=ml.bas6.hnoflo,
-          extension="bas6",)
-     
-     mfBase6.write_file(check=False)
-
-
-#淇敼dis 鏂囦欢
-def updateDisFile(model_name, per):
-
-    model_ws = base.model_dir + model_name
-    ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
-                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-
-    mfDis = flopy.modflow.ModflowDis(
-        ml,
-        nlay=ml.dis.nlay,
-        nrow=ml.dis.nrow,
-        ncol=ml.dis.ncol,
-        nper=per,
-        delr=ml.dis.delr,
-        delc=ml.dis.delc,
-        top=ml.dis.top,
-        botm=ml.dis.botm,
-        perlen=ml.dis.perlen,
-        nstp=ml.dis.nstp,
-        tsmult=ml.dis.tsmult,
-        steady=ml.dis.steady,
-        itmuni=ml.dis.itmuni,
-        lenuni=ml.dis.lenuni,
-        extension="dis")
-
-    mfDis.write_file(check=False)
diff --git a/Predict.py b/Predict.py
index 36ea1ce..8a30b46 100644
--- a/Predict.py
+++ b/Predict.py
@@ -1,23 +1,14 @@
 
 # 瀵煎叆Flask绫�
-from flask import Flask
-from flask import jsonify
-from flask import request
-from flask_cors import CORS
-import sys
 import numpy as np
-import pandas as pd
 import flopy
 import flopy.utils.binaryfile as bf
-import csv
-import time
-from openpyxl import load_workbook
 import os
-import shutil
 import json
 import Base as base
 import CalHead
 import ModelPeriod
+import OpenExcel
 
 
 base_init_year=["2020","2021","2022"]
@@ -30,16 +21,15 @@
 #闄嶆按閲�
 
 base_water = base.prefix + 'base_water.ini'
-def predict_water_chart(base_year,start_time ,end_time):
-    
-    
+def predict_water_chart(base_year,start_time ,end_time,value):   
      water_array = np.loadtxt(base_water, dtype=str,encoding='utf-8')
-
      y_data=[]
-     x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12")
-     water= water_array[0]
-     for e in water:
-         y_data.append(e)
+     x_data= ModelPeriod.get_months_in_year()
+     water= water_array[int(base_year)]
+     for data in water:
+         float_data = float(data)/9
+         float_data= round(float_data*value,2)
+         y_data.append(float_data)
     
      result = {"y_data": y_data, "x_data": x_data}
      return result
@@ -47,43 +37,104 @@
 #娌虫祦鐨勬姌绾垮浘
 
 base_river = base.prefix + 'base_river.ini'
-def predict_river_chart(base_year,start_time ,end_time):
-     
-    
+def predict_river_chart(base_year,start_time ,end_time,value):
      river_array = np.loadtxt(base_river, dtype=str,encoding='utf-8')
-     
      y_data=[]
-     x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12")
-     for e in river_array:
-         y_data.append(e)
+     x_data= ModelPeriod.get_months_in_year()
+     for data in river_array:
+         #涓囩珛鏂圭背
+         float_data = float(data)/4/10000
+         float_data= round(float_data*value,2)
+         y_data.append(float_data)
          
      result = {"y_data": y_data, "x_data": x_data}
      return result
 
 
+#寮�閲囬噺鎶樼嚎鍥�
+base_mining = base.prefix + 'base_mining.ini'
+def predict_well_chart(base_year,start_time ,end_time,data):
+     #   2022/娴锋穩/鏄屽钩/闂ㄥご娌�/鐭虫櫙灞�
+     area_names=["鍏ㄩ儴鍖哄煙","娴锋穩鍖�","鏄屽钩鍖�","闂ㄥご娌熷尯","鐭虫櫙灞卞尯"]
+     mining_array = np.loadtxt(base_mining, dtype=str,encoding='utf-8')
+     x_data= ModelPeriod.get_months_in_year()
+     result = {"x_data": x_data}   
+     y_data=[]
+     for item in data:
+         name = item["area_name"]
+         value = float(item["value"]) 
+         if name =='鏈濋槼鍖�':
+             continue
+         index = area_names.index(name)   
+         row_data = mining_array[index]
+         float_data=[]
+         for item in row_data:
+             x = round(float(item)/10000*value,2)
+             float_data.append(x)
+         dicts={"name":name,"data":float_data}
+         y_data.append(dicts)
+     result["y_data"] = y_data
+     return result
+ 
+    
+def run_model(model_name):
+    if model_name == base.not_allowed_model or model_name in base.archive_models:
+        return "鏈ā鍨嬩负楠岃瘉妯″瀷,涓嶅厑璁镐慨鏀癸紒"
+    
+    dicts= run_model_predict(model_name)
+    if dicts["code"] == 400:
+        return dicts
+    #瀵煎嚭csv鏂囦欢
+    CalHead.exportCsV(model_name)
+    
+    # #鏇存柊妯″瀷涓夌淮缃戞牸閰嶇疆
+    base.updateModelConfig(model_name)
+    
+    # #鍒涘缓妯″瀷鐨勪笁缁寸綉鏍�
+    filedir = base.model3d_path + model_name
+    
+    if not os.path.exists(filedir):
+        os.makedirs(filedir, exist_ok=True)
+        
+    base.callModelexe() 
+    #璁$畻姘磋祫婧愰噺鍜屾按鍧囪 
+    CalHead.run_zonebudget_bal(model_name)
+    CalHead.run_zonebudget_res(model_name)
+    dicts= {"code":200,"msg":"棰勬祴妯″瀷杩愯鎴愬姛锛�" } 
+    return dicts
+
+
 def run_model_predict(model_name):
-       
+     
     predictiondata=""   
     prediction_path = base.model_dir + model_name +"\\prediction.json"
     if os.path.exists(prediction_path):
         with open(prediction_path,encoding='utf-8') as f:
-             predictiondata = json.load(f)
+              predictiondata = json.load(f)
     
-    if predictiondata:
-                    
+    periods =CalHead.get_model_period(model_name)
+    periods_len= len(periods)
+    
+    if predictiondata:      
+        updateDisFile(model_name,periods_len)         
         try:
-            updateDisFile(model_name,predict_per)
-            
-            updateBase6File(model_name,predictiondata)
+           flag =  updateBase6File(model_name,predictiondata)
+           if bool(flag)==False:
+                dicts= {"code":400,"msg":"璇锋鏌ユā鍨嬬殑鍒濆姘存槸鍚﹁缃紒" } 
+                return  dicts             
+        except:      
+             dicts= {"code":400,"msg":"璇锋鏌ユā鍨嬬殑鍒濆姘存槸鍚﹁缃紒" } 
+             return  dicts       
  
-            updateRchFile(model_name,predictiondata)
+        try:
+            updateRchFile(model_name,predictiondata)   
+        except:      
+            print("RchFile鏃犻娴嬪弬鏁帮紝鏃犻渶淇敼锛�")
             
+        try:
             updateRiverFile(model_name,predictiondata)
-        except:
-            
-            return "璇锋鏌ュ垵濮嬫按澶淬�侀檷姘撮噺銆佹案瀹氭渤鍏ユ笚閲忋�佸紑閲囬噺绛夊弬鏁版槸鍚﹀~鍐欏畬鏁达紒"
-            
-            
+        except:      
+            print("RiverFile鏃犻娴嬪弬鏁帮紝鏃犻渶淇敼锛�")    
     else:
         print("prediction.json 棰勬祴鍦烘櫙鏂囦欢涓虹┖锛屾棤闇�鏇存敼鐩稿簲鏂囦欢")
     
@@ -93,7 +144,8 @@
     ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
                                      exe_name="mf2005", verbose=True,  version="mf2005", check=False)   
     ml.run_model(report = True)
-    return "棰勬祴妯″瀷杩愯鎴愬姛锛�"
+    dicts= {"code":200,"msg":"棰勬祴妯″瀷杩愯鎴愬姛锛�" } 
+    return dicts
 
 
     
@@ -142,11 +194,21 @@
         
         rain_ratio = float(predictiondata["rain"]["ratio"])
         rain_base_year = predictiondata["rain"]["base_year"]
+        if rain_base_year=='4':
+            rain_base_year="1"
         
         river_ratio= float(predictiondata["river"]["ratio"])
         area= predictiondata["mine"]["area"] 
         
-        ws = base.predictParamModel + rain_base_year
+        # ws = base.predictParamModel + rain_base_year
+        ws=""
+        start_time = predictiondata["start_time"]
+        end_time =  predictiondata["end_time"]
+        count = ModelPeriod.get_months_in_range_count(start_time, end_time)
+        if count==12:
+             ws=  base.predictParamModel + rain_base_year     
+        else:
+             ws=  base.predictModel60  
         
         baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= ws,
                                     exe_name="mf2005", verbose=True,  version="mf2005", check=False)
@@ -161,7 +223,10 @@
         
         lrcq = {}
         
-        for per in range(predict_per):
+        periods =CalHead.get_model_period(model_name)
+        periods_len= len(periods)
+        
+        for per in range(periods_len):
             wel = [] 
             array2d = [] 
             
@@ -188,14 +253,21 @@
                 array = [Layer, Row, Column, Q]
                 array2d.append(array)
                 
+            #杩藉姞浜�
             flex_data= getFlexdata(model_name)
+            print("==============")
+            print(flex_data)
     
             for i in range(len(flex_data)):
                 array2d.append(flex_data[i])
           
             lrcq[per] = array2d 
             
-        flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq)
+        flopy.modflow.ModflowWel(updateMdoel,
+                                 ipakcb= baseMdoel.wel.ipakcb,
+                                 dtype=baseMdoel.wel.dtype,
+                                 options=baseMdoel.wel.options,
+                                 stress_period_data=lrcq)
         updateMdoel.write_input()          
                                
     else:     
@@ -227,11 +299,30 @@
     if flag == "true":
          #涓版按骞� 鏋按骞�
         base_year = predictiondata["rain"]["base_year"]  
-        ratio= float(predictiondata["rain"]["ratio"])
+        if base_year =="1" or base_year =="2" or base_year =="3":
+            updateRchBaseYear(model_name,predictiondata)
+        elif  base_year =="4":
+            #鎺ュ叆浼犳劅鍣ㄦ暟鎹�
+            updateRchRealData(model_name,predictiondata)
         
+    else:   
+        print("Rch鏂囦欢鏃犻渶淇敼锛�")
+
+#鏇存柊骞虫按骞� 涓版按骞� 鏋按骞存暟鎹�
+def updateRchBaseYear(model_name,predictiondata):
+      #涓版按骞� 鏋按骞�
+        base_year = predictiondata["rain"]["base_year"]  
+        ratio= float(predictiondata["rain"]["ratio"])   
          #鏁版嵁鏉ユ簮鐨勬ā鍨嬫枃浠跺す
-        base_ws=  base.predictParamModel + base_year
-        
+        base_ws=""
+        start_time = predictiondata["start_time"]
+        end_time =  predictiondata["end_time"]
+        count = ModelPeriod.get_months_in_range_count(start_time, end_time)
+        if count==12:
+             base_ws=  base.predictParamModel + base_year     
+        else:
+             base_ws=  base.predictModel60    
+            
         baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base_ws,
                                     exe_name="mf2005", verbose=True,  version="mf2005", check=False)
         
@@ -239,28 +330,107 @@
         updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
                                     exe_name="mf2005", verbose=True,  version="mf2005", check=False)
         
-        for per in range(predict_per):
-            
+           
+        periods =CalHead.get_model_period(model_name)
+        periods_len= len(periods)
+        for per in range(periods_len):       
             item = baseMdoel.rch.rech.__getitem__(kper = per)
             array2d = item.get_value()
             array2d_len = len(array2d)
             
             for i in range(array2d_len):
-      
                   array_len = len(array2d[i])
-                  for j in range(array_len):
-                  
-                      if str(base.area_array[i][j]) != '-9999':
-                    
+                  for j in range(array_len):          
+                      if str(base.area_array[i][j]) != '-9999':           
                           array2d[i][j] =  array2d[i][j] * ratio
                            
-            updateMdoel.rch.rech.__setitem__(key = per, value=array2d) 
-          
-        rch = flopy.modflow.ModflowRch(updateMdoel, rech=updateMdoel.rch.rech)
-        rch.write_file(check=False)
+            updateMdoel.rch.rech.__setitem__(key = per, value=array2d)         
+        rch = flopy.modflow.ModflowRch(updateMdoel,
+                                       nrchop=baseMdoel.rch.nrchop,
+                                       ipakcb=baseMdoel.rch.ipakcb,
+                                       rech=updateMdoel.rch.rech,
+                                       irch =baseMdoel.rch.irch)
+         
+        rch.write_file(check=False)    
+
+    
+#鎺ュ叆浼犳劅鍣ㄦ暟鎹�
+def updateRchRealData(model_name,predictiondata):
+    
+       # 鍙栧钩姘村勾鐨剅ch鏂囦欢
+        base_year = "1"  
+        ratio= float(predictiondata["rain"]["ratio"])   
+        if not ratio:
+            ratio = 1
+        excel_data = OpenExcel.read_jyl_excel(model_name) 
+        array_data =[]
         
-    else:   
-        print("Rch鏂囦欢鏃犻渶淇敼锛�")      
+        if not excel_data:
+            print("闄嶆按妯℃澘鏁版嵁鏈幏鍙栧埌锛�")
+            return "闄嶆按妯℃澘鏁版嵁鏈幏鍙栧埌锛�" 
+        for i in range(1,len(excel_data)):
+            temp =[]
+            for j in range(1,len(excel_data[i])):
+                data = round(float(excel_data[i][j]),8) 
+                temp.append(data)
+            array_data.append(temp)
+        
+        base_ws=""
+        start_time = predictiondata["start_time"]
+        end_time =  predictiondata["end_time"]
+        count = ModelPeriod.get_months_in_range_count(start_time, end_time)
+        if count==12:
+             base_ws=  base.predictParamModel + base_year     
+        else:
+             base_ws=  base.predictModel60      
+   
+        baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base_ws,
+                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
+          
+        update_model_ws = base.model_dir + model_name
+    
+        updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
+                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
+        
+        #闄嶉洦鍒嗗尯
+        area_dicts = base.getAreas()
+        
+        
+        periods =CalHead.get_model_period(model_name)
+        periods_len= len(periods)
+        
+        for per in range(periods_len):   
+            #16涓垎鍖烘煇涓懆鏈熺殑 鏁版嵁 涓嬫爣(0,15) 鍜宎rea_dicts瀛楀吀闄嶉洦鍒嗗尯鐨刱ey瀵瑰簲
+            water_per_data = get_Array2_column(array_data,per)   
+
+            item = baseMdoel.rch.rech.__getitem__(kper = per)
+            array2d = item.get_value()
+            for key in area_dicts:  
+                tuples= area_dicts[key]
+                values = water_per_data[int(key)] 
+                for i in range(len(tuples)):
+                    x = tuples[i][0]
+                    y = tuples[i][1]
+                    array2d[x][y]= values*ratio
+            
+            updateMdoel.rch.rech.__setitem__(key = per, value=array2d)   
+            
+        rch = flopy.modflow.ModflowRch(updateMdoel,
+                                       nrchop=baseMdoel.rch.nrchop,
+                                       ipakcb=baseMdoel.rch.ipakcb,
+                                       rech=updateMdoel.rch.rech,
+                                       irch =baseMdoel.rch.irch)
+         
+        rch.write_file(check=False) 
+        print("闄嶆按鍒嗗尯鏁版嵁鏇存柊瀹屾瘯!")
+        return "闄嶆按鍒嗗尯鏁版嵁鏇存柊瀹屾瘯锛�" 
+
+#鑾峰彇鍒楁暟鎹�
+def get_Array2_column(array_data,column):
+    arr = np.array(array_data)
+    column_data = arr[:, column]
+    return column_data
+    
         
 def check_rain_param(predictiondata):
     
@@ -287,18 +457,18 @@
      model_ws = base.model_dir + model_name
      ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
                                     exe_name="mf2005", verbose=True,  version="mf2005", check=False)
-     
-     
      #鍒濆姘村ご
-     init_header = predictdata["initHeader"]
      
-     dir = base.model_dir + init_header + "\\modflow.head"
+     if "initHeader" not in predictdata:
+         print("=============has no  initHeader=============")
+         return False
+     init_header = predictdata["initHeader"]  
+     
+     dir = base.model_dir + init_header + "\\modflow.head"   
      head = bf.HeadFile(dir)
      alldata = head.get_alldata()
      
-     lens = len(alldata)
-     last_index = lens-3
-     
+     last_index = len(alldata)-1    
      last_array3= alldata[last_index]
 
      strt = ml.bas6.strt
@@ -317,6 +487,7 @@
           extension="bas6",)
      
      mfBase6.write_file(check=False)
+     return True
 
 
 #淇敼dis 鏂囦欢
@@ -345,3 +516,5 @@
         extension="dis")
 
     mfDis.write_file(check=False)
+    
+
diff --git a/WaterXBL.py b/WaterXBL.py
new file mode 100644
index 0000000..5e9a980
--- /dev/null
+++ b/WaterXBL.py
@@ -0,0 +1,77 @@
+
+import flopy.utils.binaryfile as bf
+import Base as base
+import numpy as np
+
+#缁欐按搴�
+water_u = 0.2
+#鐮旂┒鍖哄煙闈㈢Н骞虫柟绫�
+water_F= float(5652 * 500 *500)
+#鏈夋晥璁$畻鍗曞厓鏍�
+water_invalid_cell =5652
+#鐮旂┒鍖哄煙骞虫柟鍗冪背
+water_F_KM= 680.250
+
+pyq_cells_total =2721           
+
+
+#鍦颁笅姘磋搫鍙橀噺
+def get_grd_storage(model_name,per1,per2):       
+    arr = np.loadtxt(base.xs_mp_path, dtype=int)
+    pyq_cells=[]
+    k=0
+    for i in range(len(arr)):
+           for j in range(len(arr[i])):  
+               if arr[i][j] == 1:  
+                   k+=1
+                   pyq_cells.append((i,j))
+
+    dir = base.model_dir + model_name  + "\\modflow.head" 
+
+    if model_name=="202001_202212":
+         dir = base.baseModel2  + "\\modflow.head" 
+  
+    head = bf.HeadFile(dir)
+    alldata = head.get_alldata() 
+    
+    #鍒濆姘翠綅鏁版嵁
+    z_start = alldata[int(per1+1)*3-3,0,:,:] 
+    #缁撴潫姘翠綅鏁版嵁
+    
+    z_end = alldata[int(per2+1)*3-1,0,:,:] 
+    
+    z_start[(z_start<=0)] = 0 
+    z_end[(z_end<=0)] = 0 
+    
+    # z_start_avg = float(np.sum(z_start)/5652)
+    # z_end_avg = float(np.sum(z_end)/5652)
+    
+    z_start_total = 0
+    z_end_total = 0
+    for item in pyq_cells:
+        i = item[0]
+        j = item[1]
+        z_start_total  += z_start[i,j]
+        z_end_total += z_end[i,j]
+    
+    z_start_avg = z_start_total/2721
+    z_end_avg = z_end_total/2721
+    
+    #螖W锛�100路(h1-h2)路渭路F/t
+    year = (per2+1-per1)/12
+    # print(year)
+    storage = 100 * (z_start_avg-z_end_avg) * water_u * water_F_KM /year
+    return storage
+
+
+
+#鐩稿鍧囪 宸� Q1 
+#Q鎬昏ˉ锛峇鎬绘帓卤螖W锛澪�	
+# 围/Q鎬昏ˉ *100% 
+def get_grd_relative_equ(Q1, Q2,w):
+   x=  ( Q1-Q2) + w 
+   y =  x/Q1
+   return y 
+
+
+
diff --git a/main.py b/main.py
new file mode 100644
index 0000000..432b616
--- /dev/null
+++ b/main.py
@@ -0,0 +1,60 @@
+
+# 瀵煎叆Flask绫�
+from flask import Flask
+from flask import jsonify
+from flask import request
+from flask_cors import CORS
+import pymssql
+
+
+# Flask鍑芥暟鎺ユ敹涓�涓弬鏁癬_name__锛屽畠浼氭寚鍚戠▼搴忔墍鍦ㄧ殑鍖�
+app = Flask(__name__)
+CORS(app, supports_credentials=True, resources=r'/*')
+
+server = '192.168.0.123:1433'
+user='sa'
+password='admin123X'
+database ='microseism3'
+
+#鏍规嵁鏈堜唤鑾峰彇鏁版嵁
+def get_event_location_data(month):
+     
+    conn = pymssql.connect(server=server, user=user, password=password, database=database,as_dict=True)
+    cursor = conn.cursor()
+    res =[] 
+    try:
+        sqlStr = 'SELECT * FROM dbo.event_location_'+ str(month)
+        # 鎵ц鏌ヨ璇彞鎴栧叾浠栨搷浣�
+        cursor.execute(sqlStr)
+    
+        # 鑾峰彇缁撴灉闆�
+        result = cursor.fetchall()
+    
+        for row in result:
+           dic={"x":row["Event_X"],"y":row["Event_Y"],"z":row["Event_Z"],"v":row["Event_Energy"]}
+           res.append(dic)
+        
+    except Exception as e:
+        print("Error occurred:", str(e))
+        return []
+      
+    finally:
+        # 鍏抽棴杩炴帴
+        cursor.close()
+        conn.close()
+    return res   
+
+#鏍规嵁浼犲叆鐨勬湀浠芥煡璇㈡暟鎹�
+@app.route('/get_event_location_data', methods=['GET'])
+def event_location_data():   
+    month = request.args.get('month') 
+    res =  get_event_location_data(month)
+    return jsonify(res)
+
+
+if __name__ == '__main__':
+    #app.run()    # 鍙互鎸囧畾杩愯鐨勪富鏈篒P鍦板潃锛岀鍙o紝鏄惁寮�鍚皟璇曟ā寮�
+    app.run(host="192.168.0.107", port=8080)
+    
+    
+
diff --git a/test.py b/test.py
deleted file mode 100644
index 192d800..0000000
--- a/test.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Thu Dec 21 12:02:24 2023
-
-@author: ZMK
-"""
-
-import Base 
-
-import numpy as np
-
-# model_config ='C:\\Users\\ZMK\\Desktop\\objclipdig\\ModelFlow_xishan\\config.ini'
-
-# conf = np.loadtxt(model_config, dtype=str,encoding='utf-8')
-
-
-# conf[1]='1'
-
-# np.savetxt(model_config,conf,fmt='%100s',encoding='utf-8')
-
-# print(conf)
-Base.updateModelConfig("202301_202312")

--
Gitblit v1.9.1