zmk
2024-08-28 1ae38ab34bcbdff622c7623119ee54bad419f4ed
提交代码
2个文件已删除
5个文件已修改
11个文件已添加
4921 ■■■■ 已修改文件
AchiveReport.py 494 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
Base-dev.py 173 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
Base.py 43 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
BigData.py 39 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
CalHead.py 872 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
DataTask.py 325 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
DataTransf.py 526 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
GRU_zmk.py 275 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
GRU_zmk_pre - 副本.py 215 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
GRU_zmk_pre.py 267 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
MainAPI.py 604 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
ModelPeriod.py 7 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
OpenExcel.py 61 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
Predict - 副本.py 592 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
Predict.py 269 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
WaterXBL.py 77 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
main.py 60 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
test.py 22 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
AchiveReport.py
New file
@@ -0,0 +1,494 @@
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import math
from docxtpl import DocxTemplate
import flopy
import flopy.utils.binaryfile as bf
from matplotlib import cm, colors
import os
import time
import Base as base
import CalHead
import WaterXBL
#获取Z值
def get_flow_Z(model_name,per,layer):
    dir = base.model_dir + model_name  + "\\modflow.head"
    mlist = CalHead.muiltyModelList()
    if model_name==base.not_allowed_model:
         dir = base.baseModel2  + "\\modflow.head"
    if  model_name in mlist :
         dir = base.muiltyModel + model_name  + "\\modflow.head"
    head = bf.HeadFile(dir)
    alldata = head.get_alldata()
    #获取周期数据
    z = alldata[int(per+1)*3-1,int(layer),:,:]
    z[(z<=0)] = 0
    return z
#读取base6文件读取初始水位
def get_init_flowZ(model_name,layer):
     model_ws = base.model_dir + model_name
     ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
     strs = ml.bas6.strt.__getitem__(layer)
     z = strs.get_value()
     z[(z<=0)] = 0
     #处理验证模型的无效边界
     if model_name==base.not_allowed_model:
         arr = np.loadtxt(base.areapath, dtype=int)
         for i in range(len(arr)):
             for j in range(len(arr[i])):
                 if arr[i][j] == -9999:
                     z[i][j]=0
     return z
#获取流场图的打了等值线数组
def get_flow_levels(data):
    maxdata= np.max(data)
    levels=[]
    levels.append(0.1)
    lines = 10
    while lines < maxdata:
        levels.append(lines)
        lines = lines + 10
    levels.append(math.ceil(maxdata))
    return levels
#获取变幅比较小的等值线图
def get_flow_levels_small(min , max, line_count):
    step = round((max-min)/line_count,0)
    if step <= 0:
        step = 0.5
    levels=[]
    levels.append(min)
    lines = min + step
    while lines < max:
        levels.append(lines)
        lines = lines + step
    levels.append(max)
    return levels
#获取变幅的Z值
def get_bf_z(model_name,per1,per2):
    dir = base.model_dir + model_name  + "\\modflow.head"
    mlist = CalHead.muiltyModelList()
    if model_name==base.not_allowed_model:
         dir = base.baseModel2  + "\\modflow.head"
    if  model_name in mlist :
         dir = base.muiltyModel + model_name  + "\\modflow.head"
    head = bf.HeadFile(dir)
    alldata = head.get_alldata()
    #变幅值
    z1= alldata[int(per1+1)*3-1,0,:,:]
    z2 = alldata[int(per2+1)*3-1,0,:,:]
    res = np.subtract(z2,z1)
    return res
#获取变幅的流场图
def getFlowFieldBF(model_name,per1,per2,outpath):
    res = get_bf_z(model_name,per1,per2)
    max = np.max(res)
    min = np.min(res)
    # æŠŠæ— æ•ˆåŒºè®¾ç½®ä¸º9999
    arr = np.loadtxt(base.areapath, dtype=int)
    for i in range(len(arr)):
       for j in range(len(arr[i])):
           if arr[i][j] == -9999:
                res[i][j]= 9999.0
    levels= get_flow_levels_small(min,max,10)
    draw_flow(res,levels,outpath,cm.RdBu_r)
    str_levs=[]
    for item in levels:
        str_levs.append(str(round(item,2)))
    cols = get_gradient_color_list("RdBu_r",len(levels))
    dicts ={"levels":str_levs,"colors":cols}
    xbl = WaterXBL.get_grd_storage(model_name,int(per1),int(per2))
    dicts["xbl"]=xbl
    return dicts
#获取变幅 åŸ‹æ·±
def getFlowFieldDepth(model_name,per,layer,title,flow_types,out_path):
    Z= get_flow_Z(model_name,per,layer)
    dis = np.loadtxt(base.dis_top_path, dtype=str)
    areaMatrix = np.loadtxt(base.areapath, dtype=int)
    for i in range(len(areaMatrix)):
       for j in range(len(areaMatrix[i])):
           if areaMatrix[i][j] == -9999:
                Z[i][j] =0.0
           else :
                Z[i][j] =float(dis[i][j])-Z[i][j]
    levels=[10,30,40,60,100,200,300,400,500,700,900]
    draw_flow(Z,levels,out_path,cm.RdBu_r)
    cols = get_gradient_color_list("RdBu_r",len(levels))
    dicts ={"levels":levels,"colors":cols}
    return dicts
def getWaterResFiled(model_name,per):
    pic = str(int(time.time())) +".png"
    outpath = base.flow_file + pic
    flow_field(model_name,per,0,"flow","online",outpath)
    return pic
#流场图
def flow_field(model_name,per,layer,title,flow_types,out_path):
    Z= get_flow_Z(model_name,per,layer)
    levels = get_flow_levels(Z)
    draw_flow(Z,levels,out_path,cm.RdBu)
    cols = get_gradient_color_list("RdBu",len(levels))
    dicts ={"levels":levels,"colors":cols}
    return dicts
#初始流场图
def init_flow_field(model_name,layer,title,flow_types,out_path):
    #如果是验证模型直接读取base6的文件当做初始水头
    if model_name == base.not_allowed_model:
        Z= get_init_flowZ(model_name,layer)
        levels = get_flow_levels(Z)
        draw_flow(Z,levels,out_path,cm.RdBu)
    else:
        pjson= CalHead.get_model_json(model_name)
        if "initHeader" in pjson:
             initHead = pjson["initHeader"]
             print(initHead)
             Z= get_flow_Z(initHead,0,0)
             levels = get_flow_levels(Z)
             draw_flow(Z,levels,out_path,cm.RdBu)
        else:
            return "#"
    print(out_path)
    return out_path
 #绘制流场
def draw_flow(Z,levels,out_path,colorbar):
     #开始绘制流场图
    x = np.arange(0, 114, 1)
    y = np.arange(0, 104, 1)
    #网格
    X, Y = np.meshgrid(x, y)
    #坐标轴翻转 å·¦ä¸Šè§’为起始
    plt.gca().invert_yaxis()
    #等值线
    C = plt.contour(X, Y, Z, levels=levels, linewidths=0.5, colors='white')
    #等值线样式
    plt.clabel(C, inline=2,fmt='%.2f', fontsize=4,colors='black')
    #绘制等值线之间的填充   # cmap="RdBu_r"  cm.RdBu_r
    plt.contourf(X, Y, Z,levels= levels,alpha = 0.75,cmap=colorbar)
    plt.axis("off")
    plt.colorbar().ax.set_visible(False)
    plt.xticks([])
    plt.yticks([])
    plt.savefig(out_path, dpi=300,transparent=True,  bbox_inches='tight')
    plt.close('all')
    return out_path
def get_gradient_color_list(m_color_name, m_num):
    m_color_list = []
    m_color_map = plt.get_cmap(m_color_name, m_num)
    for m_i in range(m_num):
        m_color = tuple([int(_ * 256) for _ in list(m_color_map([m_i][0])[:-1])])
        m_color_list.append(m_color)
    return m_color_list
#导出模板
def exportReport(model_name,period):
    tpl = DocxTemplate(base.prefix + '地下水发布信息模板.docx')
    cont = archive_report_content(model_name,period)
    tpl.render(cont)
    save_path=""
    tpl.save(save_path)
#报告信息
def archive_report_content(model_name,period):
    data1 = archive_grd_depth(model_name,period)
    data2= archive_grd_res(model_name,period)
    #初始流场
    initpic = str(int(time.time()))  +"1.png"
    outpath = base.flow_file + initpic
    flow_field(model_name,0,0,"初始流场信息", "online",outpath)
    currentpic =  str(int(time.time()))  +"2.png"
    outpath2 = base.flow_file + currentpic
    flow_field(model_name,int(period),0,"流场信息", "online",outpath2)
    content={
    "py_avg_water":str(data1[0]),
    "py_m_water":str(data1[1]),
    "py_y_water":str(data1[2]),
    "sq_avg_water":str(data1[3]),
    "sq_m_water":str(data1[4]),
    "sq_y_water":str(data1[5]),
    "yq_avg_water":str(data1[6]),
    "yq_m_water":str(data1[7]),
    "yq_y_water":str(data1[8]),
    "w_m_res":str(data2[0]),
    "w_y_res":str(data2[1]),
    "flow1":"/xishan/xinshanFlow/"+initpic,
    "flow2":"/xishan/xinshanFlow/"+currentpic
    }
    if data1[1]>=0:
        content["py_m_water"]= "回升"+str(abs(data1[1]))
    else:
         content["py_m_water"]= "下降"+str(abs(data1[1]))
    if data1[2]>=0:
        content["py_y_water"]= "回升"+str(abs(data1[2]))
    else:
        content["py_y_water"]= "下降"+str(abs(data1[2]))
    if data1[4]>=0:
        content["sq_m_water"]= "回升"+str(abs(data1[4]))
    else:
        content["sq_m_water"]= "下降"+str(abs(data1[4]))
    if data1[5]>=0:
        content["sq_y_water"]= "回升"+str(abs(data1[5]))
    else:
        content["sq_y_water"]= "下降"+str(abs(data1[5]))
    if data1[7]>=0:
        content["yq_m_water"]= "回升"+str(abs(data1[7]))
    else:
        content["yq_m_water"]= "下降"+str(abs(data1[7]))
    if data1[8]>=0:
        content["yq_y_water"]= "回升"+str(abs(data1[8]))
    else:
        content["yq_y_water"]= "下降"+str(abs(data1[8]))
    if data2[1]>=0:
        content["w_y_res"]= "增加"+str(abs(data2[1]))
    else:
        content["w_y_res"]= "减少"+str(abs(data2[1]))
    return content
    #水资源量
def archive_grd_res(model_name,period):
    txt_path = base.model_dir + model_name + "\\water_res.txt"
    if not os.path.exists(txt_path):
          CalHead.run_zonebudget_res(model_name)
    monthdata = CalHead.water_res_month(model_name,txt_path,int(period))
    monthdata2=[]
    if int(period) > 0:
        monthdata2 = CalHead.water_res_month(model_name,txt_path,int(period-1))
    else:
        monthdata2 = monthdata
    water1 = monthdata[0] + monthdata[1]+ monthdata[2]- monthdata[3]
    water2 =  monthdata2[0] + monthdata2[1] + monthdata2[2] - monthdata2[3]
    water1 = round(water1 ,4)
    wat= round(water1-water2 ,4)
    return [water1,wat]
#生成月度报告的内容地下水深
def archive_grd_depth(model_name,period):
    model_dir = base.model_dir + model_name + "\\modflow.head"
    head = bf.HeadFile(model_dir)
    alldata = head.get_alldata()
    #获取月度数据
    per = int(period)
    current_month_data=[]
    pre_month_data=[]
    pre_year_data=[]
    if per > 0:
        current_month_data = alldata[int(per+1)*3-1,0,:,:]
        pre_month_data= alldata[int(per)*3-1,0,:,:]
    else :
        current_month_data = alldata[int(per+1)*3-1,0,:,:]
        pre_month_data=  current_month_data
    mpdict = getMPDict()
    YQSDict = getYQSDict()
    pingyuanArray = mpdict["1"]
    shanquArray = mpdict["2"]
    yqArray = YQSDict["1"]
    #高程坐标
    gc_array= np.array(getTopDis())
    depth_array = np.subtract(gc_array,current_month_data)
    py_data,py_data2 = 0,0
    sq_data, sq_data2 = 0,0
    yqs_data , yqs_data2 = 0,0
    # åŸ‹æ·±
    py_depth,sq_depth,yqs_depth =0,0,0
    k,m,n =0,0,0
    for item in pingyuanArray:
        i,j = item[0],item[1]
        if current_month_data[i,j]>0:
            py_depth += depth_array[i,j]
            py_data += current_month_data[i,j]
            k+=1
        if pre_month_data[i,j]>0:
            py_data2 += pre_month_data[i,j]
    for item in shanquArray:
        i,j = item[0],item[1]
        if current_month_data[i,j]>0:
            sq_depth += depth_array[i,j]
            sq_data += current_month_data[i,j]
            m+=1
        if pre_month_data[i,j]>0:
            sq_data2 += pre_month_data[i,j]
    for item in yqArray:
        i,j = item[0],item[1]
        if current_month_data[i,j]>0:
            yqs_depth += depth_array[i,j]
            yqs_data += current_month_data[i,j]
            n+=1
        if pre_month_data[i,j]>0:
            yqs_data2 += pre_month_data[i,j]
    py_data =   py_data/k
    sq_data =   sq_data/m
    yqs_data=  yqs_data/n
    py_data2 = py_data2/k
    sq_data2 =  sq_data2/m
    yqs_data2=  yqs_data2/n
    py_depth = py_depth/k
    sq_depth = sq_depth/m
    yqs_depth = yqs_depth/n
    result=[py_depth,py_data-py_data2,0,
            sq_depth,sq_data-sq_data2,0,
            yqs_depth,yqs_data-yqs_data2,0]
    for i in range(len(result)):
        result[i]= round(result[i],2)
    return result
#获取研究区域 å±±åŒº å¹³åŽŸåŒº çŽ‰æ³‰å±±åŒºçš„æŠ˜çº¿å›¾
def getXs3LineChart(paths):
    head = bf.HeadFile(paths)
    alldata = head.get_alldata()
    months = int(len(alldata)/3)
    mpdict = getMPDict()
    YQSDict = getYQSDict()
    pingyuanArray = mpdict["1"]
    shanquArray = mpdict["2"]
    yqArray = YQSDict["1"]
    result1=[]
    result2=[]
    result3=[]
    for per in range(months):
         current_month = alldata[int(per+1)*3-1,0,:,:]
         yqs_data, py_data, sq_data = 0.0, 0.0, 0.0
         M ,N,k = 0,0,0
         for item in pingyuanArray:
             i,j = item[0],item[1]
             if current_month[i,j]>0:
                 py_data += current_month[i,j]
                 M+=1
         py_data= round(py_data/M,2)
         result1.append(py_data)
         for item in shanquArray:
             i,j = item[0],item[1]
             if current_month[i,j]>0:
                 sq_data += current_month[i,j]
                 N+=1
         sq_data=  round(sq_data/N,2 )
         result2.append(sq_data)
         for item in yqArray:
             i,j = item[0],item[1]
             if current_month[i,j]>0:
                 yqs_data += current_month[i,j]
                 k+=1
         yqs_data=  round(yqs_data/k,2 )
         result3.append(yqs_data)
    dicts={"pyq":result1,"sq":result2,"yqs":result3}
    return dicts
#山区/平原区的字典
#平原区=1  å±±åŒº =2
def getMPDict():
    arr = np.loadtxt(base.xs_mp_path, dtype=int)
    dict ={}
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            zb = str(arr[i][j])
            if arr[i][j] == -9999:
                continue
            if zb not in dict:
                dict[zb] = [(i,j)]
            else:
                dict[zb].append((i,j))
    return dict
#玉泉山的字典
#玉泉山 = 1
def getYQSDict():
    arr = np.loadtxt(base.yqs_path, dtype=int)
    dict ={}
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            zb = str(arr[i][j])
            if arr[i][j] != 1:
                continue
            if zb not in dict:
                dict[zb] = [(i,j)]
            else:
                dict[zb].append((i,j))
    return dict
#获取高程
def getTopDis():
     arr = np.loadtxt(base.dis_top_path, dtype=str)
     float_array = np.array(arr).astype("float")
     return float_array
Base-dev.py
New file
@@ -0,0 +1,173 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 20 16:15:23 2023
@author: ZMK
"""
import numpy as np
import shutil
import os
# ç”Ÿæˆçš„æµåœºå›¾æ–‡ä»¶
flow_file ="D:\\javaCode\\xishan\\xishan\\xishan\\xinshanFlow\\"
prefix ='C:\\Users\\86134\\Desktop\\xsModel2\\'
ZoneBudget64Exe= prefix + "zonebuget\\ZoneBudget64.exe"
water_bal_zones = prefix +"zonebuget\\water_bal.zones\n"
water_res_zones = prefix +"zonebuget\\water_res.zones\n"
water_lsh_path = prefix + "water_lsh.ini"
water_yhy_path = prefix + "water_yhy.ini"
water_dbw_path = prefix + "water_dbw.ini"
baseModel = prefix + 'verifyModel\\'
baseModel2 = prefix + 'verifyModel2\\'
predictModel= prefix + 'predictModel\\'
predictParamModel= prefix + 'predictParamModel\\'
muiltyModel =  prefix + 'muiltyModel\\'
model_dir = prefix + '0612Model\\'
obswellpath = prefix + '监测井.ini'
obswell_data_path=  prefix + 'water_obs_data.ini'
obs_well = np.loadtxt(obswellpath, dtype=str,encoding='utf-8')
district_path = prefix +"区县.ini"
district= np.loadtxt(district_path, dtype=str,encoding='utf-8')
pumpwellpath = prefix +'抽水井.ini'
pump_well = np.loadtxt(pumpwellpath, dtype=str,encoding='utf-8')
period_path = prefix  +"period.json"
areapath = prefix + '分区.ini'
area_array = np.loadtxt(areapath, dtype=str,encoding='utf-8')
#水均衡路径
water_equ_path = prefix + 'water_equ.ini'
water_equ = np.loadtxt(water_equ_path, dtype=str,encoding='utf-8')
water_equ_path2022 = prefix + 'water_equ2022.ini'
water_equ2022 = np.loadtxt(water_equ_path2022, dtype=str,encoding='utf-8')
#地表高程数据
dis_top_path = prefix + 'md_dis_top.ini'
#分区的储水系数
lpf_path =  prefix + 'md_lpf.ini'
md_lpf =  np.loadtxt(lpf_path, dtype=str,encoding='utf-8')
# #玉泉山矩阵数据
yqs_path=  prefix + '玉泉山泉划分.ini'
xs_yqs_matrix =  np.loadtxt(yqs_path, dtype=str,encoding='utf-8')
# #山区平原区矩阵
xs_mp_path = prefix + '山区平原区划分.ini'
xs_mp_matrix =  np.loadtxt(xs_mp_path, dtype=str,encoding='utf-8')
model_config ='D:\\javaCode\\xishan\\objclipdig\\ModelFlow_xishan\\config.ini'
model3d_path='D:/javaCode/xishan/xishan/xishan/output2/'
modeldata_csv_path ="C:/Users/86134/Desktop/xsModel2/0612Model/"
exe_path = 'D:/javaCode/xishan/objclipdig/ModelFlow_xishan/ModelFlow_xishan.exe'
#调动 exe ç¨‹åº
def callModelexe():
    os.system(exe_path)
#更新模型的exe配置
def updateModelConfig(model_name):
    conf = np.loadtxt(model_config, dtype=str,encoding='utf-8')
    outpath = "outpath=" + model3d_path + model_name
    csvpath = "csvpath=" + modeldata_csv_path + model_name +"/output"
    conf[1]=outpath
    conf[2]=csvpath
    np.savetxt(model_config,conf, newline='\n', fmt='%s' , encoding='utf-8')
def getPumpWellName(row,column):
    for index, r, c,ids, qu ,name in pump_well:
        if r==row and c == column:
            return name
    return "NONE"
#获取矩阵分组的字典结构
def getAreas():
    arr = np.loadtxt(areapath, dtype=int)
    dict ={}
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            zb = str(arr[i][j])
            if arr[i][j] == -9999:
                continue
            if zb not in dict:
                dict[zb] = [(i,j)]
            else:
                dict[zb].append((i,j))
    return dict
def getAreaDictFirstIndex():
    arr = np.loadtxt(areapath, dtype=int)
    dict ={}
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            if arr[i][j] == -9999:
                continue
            if arr[i][j] not in dict:
                dict[arr[i][j]] = [(i,j)]
    return dict
#获取分组小标字典数据
def getAreaDictIndexArray():
    arr = np.loadtxt(areapath, dtype=int)
    dict_array={}
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            zb= str(arr[i][j])
            if arr[i][j] == -9999:
                continue
            if zb not in dict_array:
                array= []
                index = getCellIdByRC(i+1,j+1)
                array.append(index)
                dict_array[zb] = array
            else:
                index = getCellIdByRC(i+1,j+1)
                dict_array[zb].append(index)
    return dict_array
def getCellIdByRC(rowVal, columnVal):
    return (rowVal - 1) * 114 + columnVal - 1;
Base.py
@@ -6,21 +6,47 @@
"""
import numpy as np
import shutil
import os
#成果模型的默认名字
not_allowed_model="202001_202212"
archive_models=["SP0-0","SP1-1","SP1-2","SP1-3","SP2-1","SP2-2","SP2-3","SP3-1",
            "SP3-2","SP3-4","SP3-5","SP3-6","SP3-7","SP4-1","SP4-7"]
# ç”Ÿæˆçš„æµåœºå›¾æ–‡ä»¶
flow_file ="D:\\javaCode\\xishan\\xishan\\xishan\\xinshanFlow\\"
prefix ='C:\\Users\\ZMK\\Desktop\\xsModel2\\'
ZoneBudget64Exe= prefix + "zonebuget\\ZoneBudget64.exe"
water_bal_zones = prefix +"zonebuget\\water_bal.zones\n"
water_res_zones = prefix +"zonebuget\\water_res.zones\n"
water_lsh_path = prefix + "water_lsh.ini"
water_yhy_path = prefix + "water_yhy.ini"
water_dbw_path = prefix + "water_dbw.ini"
baseModel = prefix + 'verifyModel\\'
baseModel2 = prefix + 'verifyModel2\\'
predictModel= prefix + 'predictModel\\'
predictModel60 = prefix + 'predictModel60\\'
predictParamModel= prefix + 'predictParamModel\\'
muiltyModel =  prefix + 'muiltyModel\\'
model_dir = prefix + '0612Model\\'
obswellpath = prefix + '监测井.ini'
obswell_data_path=  prefix + 'water_obs_data.ini'
well_scale_path =  prefix + 'well_scale.ini'
obs_well = np.loadtxt(obswellpath, dtype=str,encoding='utf-8')
@@ -44,6 +70,21 @@
water_equ_path2022 = prefix + 'water_equ2022.ini'
water_equ2022 = np.loadtxt(water_equ_path2022, dtype=str,encoding='utf-8')
#地表高程数据
dis_top_path = prefix + 'md_dis_top.ini'
#分区的储水系数
lpf_path =  prefix + 'md_lpf.ini'
md_lpf =  np.loadtxt(lpf_path, dtype=str,encoding='utf-8')
# #玉泉山矩阵数据
yqs_path=  prefix + '玉泉山泉划分.ini'
xs_yqs_matrix =  np.loadtxt(yqs_path, dtype=str,encoding='utf-8')
# #山区平原区矩阵
xs_mp_path = prefix + '山区平原区划分.ini'
xs_mp_matrix =  np.loadtxt(xs_mp_path, dtype=str,encoding='utf-8')
             
model_config ='C:\\Users\\ZMK\\Desktop\\objclipdig\\ModelFlow_xishan\\config.ini'
BigData.py
New file
@@ -0,0 +1,39 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 16:12:55 2023
@author: ZMK
"""
def mergeWaterData(balArray,resArray):
    dicts ={}
    inarray=[]
    inarray.append({"name":"降水入渗量","value":balArray[0]})
    inarray.append({"name":"河流入渗量","value":balArray[1]})
    inarray.append({"name":"L1侧向补给量","value":balArray[2]})
    inarray.append({"name":"L3侧向补给量","value":balArray[3]})
    outarray=[]
    outarray.append({"name":"人工开采量","value":balArray[4]})
    outarray.append({"name":"L1侧向流出量","value":balArray[5]})
    outarray.append({"name":"L3侧向流出量","value":balArray[6]})
    dicts["pie1"]=inarray
    dicts["pie2"]=outarray
    inarray2=[]
    inarray2.append({"name":"大气降水","value":resArray[0]})
    inarray2.append({"name":"永定河渗漏","value":resArray[1]})
    inarray2.append({"name":"侧向流入","value":resArray[2]})
    outarray2=[]
    outarray2.append({"name":"侧向流出","value":resArray[3]})
    dicts["pie1"]=inarray
    dicts["pie2"]=outarray
    dicts["pie3"]=inarray2
    dicts["pie4"]=outarray2
    return dicts
CalHead.py
@@ -5,15 +5,51 @@
@author: ZMK
"""
import flopy
import flopy.utils.binaryfile as bf
import csv
import Base as base
import os
import json
import subprocess
import re
import ModelPeriod
import numpy as np
import AchiveReport
#添加modle
def addModelJson(model_name,start_time,end_time,remark):
      context=""
      prediction_path = base.prefix + "\\model_list.json"
      with open(prediction_path,encoding='utf-8') as f:
             context = json.load(f)
      array=[]
      for item in context:
             array.append(item)
      dicts={"model_name":model_name,
              "start_time":start_time,"end_time":end_time,"remark":remark}
      array.append(dicts)
      with open(prediction_path, "w",encoding='utf-8') as outfile:
          json.dump(array, outfile,ensure_ascii=False)
      return "保存完毕!";
#删除model
def removeModelJson(model_name):
      context=""
      prediction_path = base.prefix + "\\model_list.json"
      with open(prediction_path,encoding='utf-8') as f:
             context = json.load(f)
      array=[]
      for item in context:
          if item["model_name"] != model_name:
              array.append(item)
      with open(prediction_path, "w",encoding='utf-8') as outfile:
          json.dump(array, outfile,ensure_ascii=False)
      return "删除模型完毕!";
def get_model_json(model_name):
@@ -21,62 +57,77 @@
      prediction_path = base.model_dir + model_name +"\\prediction.json"
      with open(prediction_path,encoding='utf-8') as f:
             period_json = json.load(f)  
      return period_json; 
  
def get_model_period(model_name):
     period_json=""
     prediction_path = base.model_dir + model_name +"\\prediction.json"
     with open(prediction_path,encoding='utf-8') as f:
             period_json = json.load(f)
     period_json= get_model_json(model_name)
     start_time = period_json["start_time"]
     end_time = period_json["end_time"]
     months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
     return months; 
    
def is_cloned_model(model_name):
    paths = base.model_dir + model_name + "\\water_bal.txt"
    if os.path.exists(paths):
        return False
    return True
#观测井chart
def obsChartdata(model_name, row, column):
def obsChartdata(wellId,model_name, row, column):
    
      row = int(row)-1
      column = int(column)-1
      dir=""
      period_json= get_model_json(model_name)
      start_time = period_json["start_time"]
      end_time = period_json["end_time"]
      xmonths = ModelPeriod.get_months_in_range_ym(start_time, end_time)
      if model_name == base.not_allowed_model:
          dir = base.baseModel2  + "\\modflow.head"
      else:
      dir = base.model_dir + model_name + "\\modflow.head"
          cloned = is_cloned_model(model_name)
          if cloned ==True:
              return  {"y_data": [],"y_data2":[],"x_data":xmonths }
      head = bf.HeadFile(dir)
      alldata = head.get_alldata()
      period = len(alldata)
      layer = 3
      xdata = []
      #周期数
      months = int(len(alldata)/3)
      ydata = []
      result = {}
      for per in range(period):
           for lay in range(layer):
               if per % 3 == 0 and lay == 0:
                   md = (int)(lay / 3 + 1)
                   per_array = alldata[per][lay]
      for month in range(months):
          z1= alldata[int(month+1)*3-1,0,:,:]
          cell_data = float(z1[row][column])
          ydata.append(round(cell_data,2))
                   cell_data = (float)(per_array[row][column])
                   ydata.append(cell_data)
      y_data2=[]
      if model_name == base.not_allowed_model:
          array_data =  np.loadtxt(base.obswell_data_path, dtype=str,encoding='utf-8')
          y_data2= getObsData(wellId,array_data)
    
      period_json= get_model_json(model_name)
      start_time = period_json["start_time"]
      end_time = period_json["end_time"]
      months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
      result = {"y_data": ydata, "x_data": months}
      result = {"y_data": ydata,"y_data2":y_data2,"x_data": xmonths}
      return result
def getRowCloumnById(index_id):
    row = 104
    column =114
    count=0
def getObsData(wellId,array_data):
    result =[]
    new_list =[]
    for item in array_data:
        if item[0]==wellId:
            result.append(item[3])
    for i in range(0,len(result),3):
        data =( float(result[i]) +float(result[i+1])+float(result[i+2]))/3
        data = round(data,2)
        new_list.append(data)
    
    return new_list;
def getRowCloumnById(index_id):
    row,column,count = 104,114,0
    for  i in range(row):
        for j in range(column):
            if index_id == count:
@@ -90,113 +141,36 @@
def earthWaterChart(model_name, index_id):
    
      row_column =  getRowCloumnById(index_id)
      row = row_column[0]
      column = row_column[1]
      dir = base.model_dir + model_name + "\\modflow.head"
      head = bf.HeadFile(dir)
      alldata = head.get_alldata()
      period = len(alldata)
      layer = 3
      ydata = []
      result = {}
      for per in range(period):
           for lay in range(layer):
               if per % 3 == 0 and lay == 0:
                   per_array = alldata[per][lay]
                   cell_data = (float)(per_array[row][column])
                   ydata.append(cell_data)
    
      period_json= get_model_json(model_name)
      start_time = period_json["start_time"]
      end_time = period_json["end_time"]
      months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
      result = {"y_data": ydata, "x_data": months}
      return result
def heatmapdata(model_name,period):
      xmonths = ModelPeriod.get_months_in_range_ym(start_time, end_time)
      dir = ""
      if model_name == base.not_allowed_model:
          dir = base.baseModel2  + "\\modflow.head"
      else:
    dir = base.model_dir + model_name + "\\modflow.head"
          cloned = is_cloned_model(model_name)
          if cloned ==True:
              return  {"y_data": [],"x_data":xmonths }
      
    head = bf.HeadFile(dir)
    alldata = head.get_alldata()
    
    index = int(period)*3
    return alldata[index][0]
      #周期数
      months = int(len(alldata)/3)
      ydata= []
      result = {}
      for month in range(months):
          z1= alldata[int(month+1)*3-1,0,:,:]
          cell_data = float(z1[row][column])
          ydata.append(round(cell_data,2))
#水均衡计算
def waterEqu(model_name):
    if model_name == '202001_202212':
        water_equ_path = base.prefix + "\\water_equ.json"
        with open(water_equ_path,encoding='utf-8') as f:
             data = json.load(f)
             return data
    else:
        year = model_name[0:4]
        title =[year]
        dict ={"title":title}
        celldata = np.array(base.water_equ2022).tolist()
        predict_json= get_model_json(model_name)
        a1=float(celldata[0])
        a2=float(celldata[1])
        a3=float(celldata[2])
        a4=float(celldata[3])
        b1=float(celldata[4])
        b2=float(celldata[5])
        b3=float(celldata[6])
        if predict_json["rain"]:
            a1= float(predict_json["rain"]["ratio"]) * float(celldata[0])
            a3= float(predict_json["rain"]["ratio"]) * float(celldata[2])
            a4= float(predict_json["rain"]["ratio"]) * float(celldata[3])
            b2= float(predict_json["rain"]["ratio"]) * float(celldata[5])
            b3= float(predict_json["rain"]["ratio"]) * float(celldata[6])
        if predict_json["river"]:
            a2=  float(predict_json["river"]["ratio"]) * float(celldata[1])
        if predict_json["mine"]:
            b1=  b1
        in_data= a1+a2+a3+a4
        out_data= b1 +b2 + b3
        float_data=[a1,a2,a3,a4,in_data,b1,b2,b3,out_data,in_data-out_data]
        inarray=[]
        inarray.append({"name":"降水入渗量","value":a1})
        inarray.append({"name":"河流入渗量","value":a2})
        inarray.append({"name":"L1侧向补给量","value":a3})
        inarray.append({"name":"L3侧向补给量","value":a4})
        outarray=[]
        outarray.append({"name":"人工开采量","value":b1})
        outarray.append({"name":"L1侧向流出量","value":b2})
        outarray.append({"name":"L3侧向流出量","value":b3})
        pie1={str(year):inarray}
        pie2={str(year):outarray}
        dict["pie1"]=pie1
        dict["pie2"]=pie2
        array2d=[]
        array2d.append([str(year)])
        for i in range(len(float_data)):
            tmp=[]
            tmp.append(str(float_data[i]))
            array2d.append(tmp)
        dict["data"]=array2d
        return dict
      result = {"y_data": ydata, "x_data": xmonths}
      return result
        
#导出csv文件
@@ -207,26 +181,652 @@
    if not os.path.exists(out_path):
         os.mkdir(out_path)  
    
    # z1= alldata[int(per1)*3,0,:,:]
    head = bf.HeadFile(dir)
    alldata = head.get_alldata()
    month = len(alldata)
    layer = 3
    months = int(len(alldata)/3)
    layers = 3
    #例如 0-36月
    for month in range(months):
        for layer in range (layers):
            z_last= alldata[(month+1)*3-1,layer,:,:]
    for i in range(month):
       for j in range(layer):
          if i % 3 == 0:
            md = (int)(i / 3 + 1)
            filename = out_path + str(md) + '-' + str(j+1) + '.csv'
            filename = out_path + str(month+1) + '-' + str(layer+1) + '.csv'
            f = open(filename, 'w', newline='')
            writer = csv.writer(f)
            for p in alldata[i][j]:
            for p in z_last:
               writer.writerow(p)
            f.close()
    return out_path
 
#水均衡计算
def waterEqu(model_name):
    if model_name == base.not_allowed_model:
        water_equ_path = base.prefix + "\\water_equ.json"
        with open(water_equ_path,encoding='utf-8') as f:
             data = json.load(f)
             return data
    else:
        year = model_name
        title =[year]
        dict ={"title":title}
        jx = get_model_json(model_name)
        dict["start_time"]=jx["start_time"]
        dict["end_time"]=jx["end_time"]
        paths=base.model_dir + model_name +"\\water_bal.txt"
        wat = water_balance(model_name, paths)
        in_data= round(wat[0]+ wat[1]+ wat[2]+ wat[3] , 4)
        out_data= round(wat[4] + wat[5] +  wat[6], 4)
        inout = round(in_data-out_data, 4)
        float_data=[wat[0],wat[1],wat[2],wat[3],in_data,
                     wat[4],wat[5], wat[6],out_data,inout]
        inarray=[]
        inarray.append({"name":"降水入渗量","value":wat[0]})
        inarray.append({"name":"河流入渗量","value":wat[1]})
        inarray.append({"name":"一层侧向补给量","value":wat[2]})
        inarray.append({"name":"三层侧向补给量","value":wat[3]})
        outarray=[]
        outarray.append({"name":"人工开采量","value":wat[4]})
        outarray.append({"name":"一层侧向流出量","value":wat[5]})
        outarray.append({"name":"三层侧向流出量","value":wat[6]})
        pie1={str(year):inarray}
        pie2={str(year):outarray}
        dict["pie1"]=pie1
        dict["pie2"]=pie2
        array2d=[]
        array2d.append(["数据(亿立方米)"])
        for i in range(len(float_data)):
            tmp=[]
            tmp.append(str(float_data[i]))
            array2d.append(tmp)
        dict["data"]=array2d
        return dict
   
def run_zonebudget_bal(model_name):
    # å®šä¹‰exe文件的路径和参数
    exe_path = base.ZoneBudget64Exe
    txt_path = base.model_dir + model_name + "\\water_bal.txt\n"
    cell_path = base.model_dir + model_name + "\\modflow.flow\n"
    process = subprocess.Popen([exe_path], stdin=subprocess.PIPE,shell = True)
    process.stdin.write(txt_path.encode())  # è¾“入参数1
    process.stdin.write(cell_path.encode())
    process.stdin.write(b"title\n")
    process.stdin.write(base.water_bal_zones.encode())
    process.stdin.write(b"A\n")
    output, _ = process.communicate()
    print(output)
def run_zonebudget_res(model_name):
    # å®šä¹‰exe文件的路径和参数
    exe_path = base.ZoneBudget64Exe
    txt_path = base.model_dir + model_name + "\\water_res.txt\n"
    cell_path = base.model_dir + model_name + "\\modflow.flow\n"
    process = subprocess.Popen([exe_path], stdin=subprocess.PIPE,shell = True)
    process.stdin.write(txt_path.encode())  # è¾“入参数1
    process.stdin.write(cell_path.encode())
    process.stdin.write(b"title\n")
    process.stdin.write(base.water_res_zones.encode())
    process.stdin.write(b"A\n")
    output, _ = process.communicate()
    print(output)
def reg_find_int(text):
    numbers = re.findall(r'\d+', text)
    return numbers
def read_txt(path):
    data =[]
    with open(path, 'r') as file:
        lines = file.readlines()
        for line in lines:
           data.append(line)
    return data
#解析水均衡数据
def water_balance(model_name,paths):
    data= read_txt(paths)
    lens = len(data)
    index = 0
    segment=[]
    dicts={}
    flag = 0
    title=""
    while index < lens:
        strs = data[index].strip()
        if strs.startswith("Flow Budget for Zone"):
            segment=[]
            flag = 1
            title=strs
        if strs.startswith("Percent Discrepancy"):
            segment.append(strs)
            numbers = reg_find_int(title)
            key = ','.join(numbers)
            dicts[key]=segment
            flag = 0
        if flag ==1 :
            segment.append(strs)
        index=index+1
    recharge = 0
    for key in dicts:
        array = dicts[key]
        temp=[]
        for item in array:
            if item.startswith("RECHARGE") :
                strs = item.replace(" ", "").replace("RECHARGE=", "")
                temp.append(float(strs))
        recharge += (temp[0]-temp[1])
    #河流入渗量(正)    æ‰€æœ‰åº”力期,for Zone 3的WELLS项相加
    zone3 = 0
    for key in dicts:
        if key.startswith("3,"):
            array = dicts[key]
            temp=[]
            for item in array:
                 if item.startswith("WELLS") :
                     strs = item.replace(" ", "").replace("WELLS=", "")
                     temp.append(float(strs))
            zone3 += (temp[0]-temp[1])
    #L1侧向补给量(正)    æ‰€æœ‰åº”力期,for Zone 4的IN-WELLS项相加
    Zone4_in_well=0
    for key in dicts:
        if key.startswith("4,"):
            array = dicts[key]
            for item in array:
                 if item.startswith("WELLS") :
                     strs = item.replace(" ", "").replace("WELLS=", "")
                     data = float(strs)
                     Zone4_in_well +=data
                     break
    #L3侧向补给量(正)    æ‰€æœ‰åº”力期,for Zone 8的IN-WELLS项相加
    Zone8_in_well =0
    for key in dicts:
        if key.startswith("8,"):
            array = dicts[key]
            for item in array:
                 if item.startswith("WELLS") :
                     strs = item.replace(" ", "").replace("WELLS=", "")
                     data = float(strs)
                     Zone8_in_well +=data
                     break
    #人工开采量(负)    æ‰€æœ‰åº”力期,for Zone 5的OUT-WELLS项相加
    zone5out =0
    for key in dicts:
        if key.startswith("5,"):
            array = dicts[key]
            for item in array:
                 if item.startswith("WELLS") :
                     strs = item.replace(" ", "").replace("WELLS=", "")
                     data = float(strs)
                     zone5out +=data
      #L1侧向流出量(负)    æ‰€æœ‰åº”力期,for Zone 4的OUT-WELLS项相加
    Zone4_out_well=0
    for key in dicts:
        if key.startswith("4,"):
            array = dicts[key]
            for item in array:
                 if item.startswith("WELLS") :
                     strs = item.replace(" ", "").replace("WELLS=", "")
                     data = float(strs)
                     Zone4_out_well +=data
    # L3=L3侧向流出量(负)
    L3=0.0
    result =[recharge,zone3,Zone4_in_well,Zone8_in_well,zone5out,Zone4_out_well,L3]
    for i in range(len(result)):
        # æ¯è¡Œæ•°æ®å®žé™…上计算 çš„æ˜¯ ä¸€ä¸ªstep , 1个step =10天
        result[i]= round(result[i]/100000000*10, 4)
    return result
#解析水资源量数据
def water_res(model_name,paths):
    data= read_txt(paths)
    lens = len(data)
    index = 0
    segment=[]
    dicts={}
    flag = 0
    title=""
    while index < lens:
        strs = data[index].strip()
        if strs.startswith("Flow Budget for Zone"):
            segment=[]
            flag = 1
            title=strs
        if strs.startswith("Percent Discrepancy"):
            segment.append(strs)
            numbers = reg_find_int(title)
            key = ','.join(numbers)
            dicts[key]=segment
            flag = 0
        if flag ==1 :
            segment.append(strs)
        index=index+1
    # å¤§æ°”降水    for zone1里所有的recharge项相加(包括in和out)
    zone1_rechage = 0
    for key in dicts:
       if key.startswith("1,"):
          array = dicts[key]
          temp=[]
          for item in array:
              if item.startswith("RECHARGE") :
                  strs = item.replace(" ", "").replace("RECHARGE=", "")
                  temp.append(float(strs))
          zone1_rechage += (temp[0]-temp[1])
    #永定河渗漏    for zone1里所有的zone2项相加(包括in和out)
    zone1_well = 0
    for key in dicts:
        if key.startswith("1,"):
            array = dicts[key]
            indata,outdata= 0,0
            for item in array:
                 if item.startswith("Zone   2 to   1") :
                     strs = item.replace(" ", "").replace("Zone2to1=", "")
                     indata = float(strs)
                 if item.startswith("Zone   1 to   2") :
                     strs = item.replace(" ", "").replace("Zone1to2=", "")
                     outdata = float(strs)
            zone1_well += (indata-outdata)
    #侧向流入    For zone7 é‡Œæ‰€æœ‰çš„zone8项相加(包括in和out)
    zone7=0
    for key in dicts:
        if key.startswith("7,"):
            array = dicts[key]
            indata,outdata= 0,0
            for item in array:
                 if item.startswith("Zone   8 to   7") :
                     strs = item.replace(" ", "").replace("Zone8to7=", "")
                     indata = float(strs)
                 if item.startswith("Zone   7 to   8") :
                     strs = item.replace(" ", "").replace("Zone7to8=", "")
                     outdata = float(strs)
            zone7 += (indata-outdata)
    #越流排泄    For zone6 é‡Œæ‰€æœ‰çš„zone7项相加(包括in和out)
    zone6 =0
    for key in dicts:
        if key.startswith("6,"):
            array = dicts[key]
            indata,outdata= 0,0
            for item in array:
                 if item.startswith("Zone   7 to   6") :
                     strs = item.replace(" ", "").replace("Zone7to6=", "")
                     indata = float(strs)
                 if item.startswith("Zone   6 to   7") :
                     strs = item.replace(" ", "").replace("Zone6to7=", "")
                     outdata = float(strs)
            zone6 += (indata-outdata)
    result =[zone1_rechage,zone1_well,zone7,zone6]
    for i in range(len(result)):
        result[i]= round(result[i]/100000000*10,4)
    return result
 #解析水资源量数据
def water_res_month(model_name,paths,per):
    data= read_txt(paths)
    lens = len(data)
    index = 0
    segment=[]
    dicts={}
    flag = 0
    title=""
    while index < lens:
        strs = data[index].strip()
        if strs.startswith("Flow Budget for Zone"):
            segment=[]
            flag = 1
            title=strs
        if strs.startswith("Percent Discrepancy"):
            segment.append(strs)
            numbers = reg_find_int(title)
            key = ','.join(numbers)
            dicts[key]=segment
            flag = 0
        if flag ==1 :
            segment.append(strs)
        index=index+1
    # å¤§æ°”降水    for zone1里所有的recharge项相加(包括in和out)
    zone1_rechage = 0
    zone1_keys=[ "1,1,"+str(per+1),"1,2,"+str(per+1),"1,3,"+str(per+1)]
    for key in zone1_keys:
          array = dicts[key]
          temp=[]
          for item in array:
              if item.startswith("RECHARGE") :
                  strs = item.replace(" ", "").replace("RECHARGE=", "")
                  temp.append(float(strs))
          zone1_rechage += (temp[0]-temp[1])
    #永定河渗漏    for zone1里所有的zone2项相加(包括in和out)
    zone1_well = 0
    zone1_well_keys=["1,1,"+str(per+1),"1,2,"+str(per+1),"1,3,"+str(per+1)]
    for key in zone1_well_keys:
            array = dicts[key]
            indata,outdata= 0,0
            for item in array:
                 if item.startswith("Zone   2 to   1") :
                     strs = item.replace(" ", "").replace("Zone2to1=", "")
                     indata = float(strs)
                 if item.startswith("Zone   1 to   2") :
                     strs = item.replace(" ", "").replace("Zone1to2=", "")
                     outdata = float(strs)
            zone1_well += (indata-outdata)
    #侧向流入    For zone7 é‡Œæ‰€æœ‰çš„zone8项相加(包括in和out)
    zone7=0
    zone7_keys=["7,1,"+str(per+1),"7,2,"+str(per+1),"7,3,"+str(per+1)]
    for key in zone7_keys:
            array = dicts[key]
            indata,outdata= 0,0
            for item in array:
                 if item.startswith("Zone   8 to   7") :
                     strs = item.replace(" ", "").replace("Zone8to7=", "")
                     indata = float(strs)
                 if item.startswith("Zone   7 to   8") :
                     strs = item.replace(" ", "").replace("Zone7to8=", "")
                     outdata = float(strs)
            zone7 += (indata-outdata)
    #越流排泄    For zone6 é‡Œæ‰€æœ‰çš„zone7项相加(包括in和out)
    zone6 =0
    zone6_keys=["6,1,"+str(per+1),"6,2,"+str(per+1),"6,3,"+str(per+1)]
    for key in zone6_keys:
            array = dicts[key]
            indata,outdata= 0,0
            for item in array:
                 if item.startswith("Zone   7 to   6") :
                     strs = item.replace(" ", "").replace("Zone7to6=", "")
                     indata = float(strs)
                 if item.startswith("Zone   6 to   7") :
                     strs = item.replace(" ", "").replace("Zone6to7=", "")
                     outdata = float(strs)
            zone6 += (indata-outdata)
    result =[zone1_rechage,zone1_well,zone7,zone6]
    for i in range(len(result)):
        result[i]= round(result[i]/100000000*10, 4)
    return result
#多模型的水位
def water_depth(model_name):
    name_array = model_name.split(",")
    yhyMatrix = np.loadtxt(base.water_yhy_path, dtype=str,encoding='utf-8')
    lshMatrix  = np.loadtxt(base.water_lsh_path, dtype=str,encoding='utf-8')
    dbwMatrix =  np.loadtxt(base.water_dbw_path, dtype=str,encoding='utf-8')
    res ={}
    #颐和园,莲石湖,东北旺的水平线和多模型的多折线值
    yhydata=[]
    base1={"name":"地表高程","data":[52]*12}
    yhydata.append(base1)
    lshdata=[]
    base2={"name":"地表高程","data":[80]*12}
    lshdata.append(base2)
    dbwdata=[]
    base3={"name":"地表高程","data":[49]*12}
    dbwdata.append(base3)
    months = ModelPeriod.get_months_in_range_ym("2023-01","2023-12")
    for i in range(len(name_array)):
        if name_array[i] != '':
             index = muiltyModelIndex(name_array[i])
             array1 = get_column(yhyMatrix,index)
             array2 = get_column(lshMatrix,index)
             array3 = get_column(dbwMatrix,index)
             yhydata.append({"name":name_array[i],"data":convertColumnData(array1)})
             lshdata.append({"name":name_array[i],"data":convertColumnData(array2)})
             dbwdata.append({"name":name_array[i],"data":convertColumnData(array3)})
    rchMatrix = np.loadtxt(base.prefix + "base_water.ini", dtype=str,encoding='utf-8')
    riverMatrix = np.loadtxt(base.prefix + "base_river.ini", dtype=str,encoding='utf-8')
    pumpMatrix = np.loadtxt(base.prefix + "base_mining.ini", dtype=str,encoding='utf-8')
    rchdata=[]
    rch_base1 = rchMatrix[1]
    rch_base1_float =[]
    for i in range (0,len(rch_base1)):
         float_data =  round(float(rch_base1[i])/9,2)
         rch_base1_float.append(float_data)
    rchdata.append({"name":"基准值","data":rch_base1_float})
    riverdata=[]
    riverdata.append({"name":"基准值","data":riverMatrix.astype(float).tolist()})
    pumpdata=[]
    pumpX=pumpMatrix[1]
    pump_float=[]
    for i in range (0,len(pumpX)):
         float_data =  round(float(pumpX[i]),2)
         pump_float.append(float_data)
    pumpdata.append({"name":"基准值","data":pump_float})
    res["xAxis"]  = months
    res["yhy_line"]  = yhydata
    res["lsh_line"]  = lshdata
    res["dbw_line"]  = dbwdata
    for i in range(len(name_array)):
        if name_array[i] != '':
            rchdata.append(rchBaseResult(rchMatrix,name_array[i]))
            riverdata.append(riverBaseResult(riverMatrix, name_array[i]))
            pumpdata.append(pumpBaseResult(pumpMatrix, name_array[i]))
    res["rch_line"]  = rchdata
    res["river_line"]  = riverdata
    res["pump_line"]  = pumpdata
    yqsdata=[]
    pyqdata=[]
    sqdata=[]
    for i in range(len(name_array)):
         if name_array[i] != '':
             paths = base.muiltyModel + name_array[i] + "\\modflow.head"
             resdata = AchiveReport.getXs3LineChart(paths)
             pyqdata.append({"name":name_array[i],"data":resdata["pyq"]})
             sqdata.append({"name":name_array[i],"data":resdata["sq"]})
             yqsdata.append({"name":name_array[i],"data":resdata["yqs"]})
    res["yqs_line"]  = yqsdata
    res["sq_line"]  = sqdata
    res["pyq_line"]  = pyqdata
    return res
#小场景水位 å’Œå˜å¹…
def xs_depth(model_name):
    res={}
    line1,line2=[],[]
    paths = base.model_dir + model_name + "\\modflow.head"
    if model_name == base.not_allowed_model:
        paths = base.baseModel2 + "\\modflow.head"
    resdata = AchiveReport.getXs3LineChart(paths)
    line1.append({"name":"平原区","data":roundArray(resdata["pyq"])})
    line1.append({"name":"山区","data":roundArray(resdata["sq"])})
    line1.append({"name":"玉泉山地区","data":roundArray(resdata["yqs"])})
    res["depth"]  = line1
    line2.append({"name":"平原区","data":xs_bf(resdata["pyq"])})
    line2.append({"name":"山区","data":xs_bf(resdata["sq"])})
    line2.append({"name":"玉泉山地区","data":xs_bf(resdata["yqs"])})
    res["bf"]  = line2
    return res
def xs_bf(array):
    newlist=[]
    newlist.append(0)
    lens = len(array)-1
    for i in range(0,lens):
        x = array[i+1]-array[i]
        newlist.append(round(x,2))
    return newlist
def roundArray(array):
    newlist=[]
    for item in array:
        item = round(item,2)
        newlist.append(item)
    return newlist
#rch åŸºå‡†æ•°æ®
def rchBaseResult(rchMatrix,sp):
    rchDict ={}
    rch_base1 = rchMatrix[1]
    rch_base2 = rchMatrix[2]
    rch_base3 = rchMatrix[3]
    trump = getmuiltyModelparam(sp)
    types = trump[0]
    rch_x = trump[1]
    if types ==1:
        temp = muiltyArray(rch_base1,rch_x)
        for i in range(0,len(temp)):
            temp[i] =round(temp[i]/9,2)
        rchDict={"name":sp,"data":temp}
    if types ==2:
        temp = muiltyArray(rch_base2,rch_x)
        for i in range(0,len(temp)):
            temp[i] =round(temp[i]/9,2)
        rchDict={"name":sp,"data":temp}
    if types ==3:
        temp = muiltyArray(rch_base3,rch_x)
        for i in range(0,len(temp)):
            temp[i] =round(temp[i]/9,2)
        rchDict={"name":sp,"data":temp}
    return rchDict
#河流基准 æ•°æ®
def riverBaseResult(riverMatrix,sp):
    trump = getmuiltyModelparam(sp)
    river_x = trump[2]
    riverDict={"name":sp,"data":muiltyArray(riverMatrix,river_x)}
    return riverDict
#抽水井基准数据
def pumpBaseResult(pumpMatrix,sp):
    trump = getmuiltyModelparam(sp)
    pump_x =trump[3]
    pumpDict={"name":sp,"data":muiltyArray(pumpMatrix[1],pump_x)}
    return pumpDict
def muiltyArray(array,scale):
    result =[]
    for item in array:
        x= round(float(item) * scale,2)
        result.append(x)
    return result
def convertColumnData(array):
    result =[]
    new_list=[]
    for i in range(len(array)):
        if i!= 0:
             data =  transToNum(array[i])
             result.append(data)
    for index in range(len(result)):
        if index % 3 == 0:
            new_list.append(result[index])
    return new_list
def transToNum(str):
    data = 0
    try:
        data=  round(float(str),2)
        return data
    except ValueError():
        return 0
 #获取预测场景的下标值, éœ€è¦ä»Žini æ–‡ä»¶ä¸­æŒ‰ç…§åˆ—进行读取
def muiltyModelIndex(name):
    models= muiltyModelList()
    indexs = models.index(name)
    return indexs
#预测模型列表
def  muiltyModelList():
    models=["SP0-0","SP1-1","SP1-2","SP1-3","SP2-1","SP2-2","SP2-3","SP3-1",
            "SP3-2","SP3-4","SP3-5","SP3-6","SP3-7","SP4-1","SP4-7"]
    return models
#Np获取某一列的值
def get_column(matrix, column_number):
    column = matrix[:, column_number]
    return column
#元组中 1=降雨类型  2=倍数 3,河流倍数 4,抽水井倍数
def getmuiltyModelparam(sp):
    dicts={
        "SP0-0":(1,1,1,1),
        "SP1-1":(2,1,1,1),
        "SP1-2":(3,1,1,1),
        "SP1-3":(3,1.2,1,1),
        "SP2-1":(1,1,2,1),
        "SP2-2":(1,1,5,1),
        "SP2-3":(1,1,10,1),
        "SP3-1":(1,1,1,0.25),
        "SP3-2":(1,1,1,0.5),
        "SP3-4":(1,1,1,0),
        "SP3-5":(1,1,1,0.4),
        "SP3-6":(1,1,1,0.3),
        "SP3-7":(1,1,1,0.6),
        "SP4-1":(1,1,2,0.5),
        "SP4-7":(3,1.2,10,0)}
    return dicts[sp]
DataTask.py
New file
@@ -0,0 +1,325 @@
import cx_Oracle
import pymssql
from datetime import datetime, timedelta
import numpy as np
# select * from swzdh.rain;
# select * from swzdh.river;
# select * from swzdh.gw;
#麻峪雨量站
mayu_rainfall="30747850"
#陇驾庄水文站
ljz_swz="30700450"
#LQWB(龙泉)/ZSSC(再生水厂)/WTY(梧桐苑)/LSH(莲石湖)/HZZ(侯庄子)/GC(古城)
arr1=["30773702","30769360","30769280","30567304","30769320","30567303"]
haidian_rainfall="xxxx"
#SXC(双兴村93)/XM1(颐和园西门1)/XM2(颐和园西门2)/SYSC(水源三场336)/SJYY(四季御园2)/BW(北坞)
arr2=["30566324","30565080","30564400","30566335","30564840","30564880"]
#获取永定河15天的预测需要的数据
def get_ydh15_real_data():
    result =[]
    current_time = datetime.now()
    start_time =  current_time- timedelta(days=60)
    times=[]
    #时间序列
    for i in range(75):
        tmp = start_time +  timedelta(days=i)
        times.append(tmp.strftime("%Y-%m-%d"))
    #开始时间 ç»“束时间
    start_str = start_time.strftime("%Y-%m-%d")
    end_str= current_time.strftime("%Y-%m-%d")
    #麻峪数据
    mayu_data = get_data("rain",mayu_rainfall,start_str,end_str)
    mayu_dict={}
    for i in range(len(mayu_data)):
        time = mayu_data[i]["time"]
        value =  mayu_data[i]["value"]
        if time not in mayu_dict:
            mayu_dict[time] = value
    #陇驾庄
    ljz_data =  get_data("river",ljz_swz,start_str,end_str)
    ljz_dict ={}
    for i in range(len(ljz_data)):
        time = ljz_data[i]["time"]
        value =  ljz_data[i]["value"]
        if time not in ljz_data:
            ljz_dict[time] = value
    mayu_value=[]
    ljz_value=[]
    for i in range(len(times)):
        tmp = times[i]
        if tmp in mayu_dict:
            mayu_value.append(tmp)
        else:
            mayu_value.append(0)
        if tmp in ljz_dict:
            ljz_value.append(tmp)
        else:
            ljz_value.append(0)
    result.append(times)
    result.append(mayu_value)
    result.append(ljz_value)
    for i in(len(arr1)):
        data = get_data("gw",arr1[i],start_str,end_str)
        dictx ={}
        tmp_arr=[]
        for i in range(len(data)):
           time = data[i]["time"]
           value =  data[i]["value"]
           if time not in dictx:
               dictx[time] = value
        for i in range(len(times)):
            tmp = times[i]
            if tmp in dictx:
                tmp_arr.append(tmp)
            else:
                tmp_arr.append(0)
        result.append(tmp_arr)
    np_arr = np.asarray(result)
    #数组转置
    np_result = np_arr.T
    return np_result
#获取玉泉山15天的预测需要的数据
def getyqs15_real_data():
    result =[]
    current_time = datetime.now()
    start_time =  current_time- timedelta(days=60)
    times=[]
    #时间序列
    for i in range(75):
        tmp = start_time +  timedelta(days=i)
        times.append(tmp.strftime("%Y-%m-%d"))
    #开始时间 ç»“束时间
    start_str = start_time.strftime("%Y-%m-%d")
    end_str= current_time.strftime("%Y-%m-%d")
    #麻峪数据
    hd_data = get_data("rain",haidian_rainfall,start_str,end_str)
    hd_dict={}
    for i in range(len(hd_data)):
        time = hd_data[i]["time"]
        value =  hd_data[i]["value"]
        if time not in hd_dict:
            hd_dict[time] = value
    #陇驾庄
    ljz_data =  get_data("river",ljz_swz,start_str,end_str)
    ljz_dict ={}
    for i in range(len(ljz_data)):
        time = ljz_data[i]["time"]
        value =  ljz_data[i]["value"]
        if time not in ljz_data:
            ljz_dict[time] = value
    hd_value=[]
    ljz_value=[]
    for i in range(len(times)):
        tmp = times[i]
        if tmp in hd_dict:
            hd_value.append(tmp)
        else:
            hd_value.append(0)
        if tmp in ljz_dict:
            ljz_value.append(tmp)
        else:
            ljz_value.append(0)
    result.append(times)
    result.append(hd_value)
    result.append(ljz_value)
    for i in(len(arr2)):
        data = get_data("gw",arr2[i],start_str,end_str)
        dictx ={}
        tmp_arr=[]
        for i in range(len(data)):
           time = data[i]["time"]
           value =  data[i]["value"]
           if time not in dictx:
               dictx[time] = value
        for i in range(len(times)):
            tmp = times[i]
            if tmp in dictx:
                tmp_arr.append(tmp)
            else:
                tmp_arr.append(0)
        result.append(tmp_arr)
    np_arr = np.asarray(result)
    #数组转置
    np_result = np_arr.T
    return np_result
####################################################
def get_data(types,num,start_time,end_time):
    if types =='river':
        return river_list(start_time,end_time,num)
    if types =='rain':
        return rain_list(start_time,end_time,num)
    if types =='gw':
        return gw_list(start_time,end_time,num)
#河流 æ°´æ–‡ç«™
def river_list(start_time,end_time,STCD):
    # å»ºç«‹ä¸ŽOracle数据库的连接
    connection = cx_Oracle.connect('mzy/mzy_^22dPoO0@192.168.44.8:1521/swzdh')
    # åˆ›å»ºæ¸¸æ ‡å¯¹è±¡
    cursor = connection.cursor()
    sql = """
    SELECT * FROM swzdh.river
    WHERE STCD = :STCD and tm BETWEEN TO_DATE(:start_time, 'YYYY-MM-DD') AND TO_DATE(:end_time, 'YYYY-MM-DD')  order by tm ASC
    """
    res =[]
    try:
        cursor.execute(sql, {'start_time': start_time, 'end_time': end_time,'STCD':STCD})
        column_names = [row[0] for row in cursor.description]
        print("字段名:", column_names)
        # èŽ·å–æ‰€æœ‰ç»“æžœé›†
        result = cursor.fetchall()
        # è¾“出结果
        for row in result:
            times = row[2]
            date_str = times.strftime("%Y-%m-%d")
            dicts={"time":date_str,"value":row[4],"stname":row[1]}
            res.append(dicts)
    except Exception as e:
            print("Error occurred: ", str(e))
    finally:
    # å…³é—­æ¸¸æ ‡å’Œè¿žæŽ¥
        cursor.close()
        connection.close()
    return res
   #降雨量
def rain_list(start_time,end_time,STCD):
    # å»ºç«‹ä¸ŽOracle数据库的连接
    connection = cx_Oracle.connect('mzy/mzy_^22dPoO0@192.168.44.8:1521/swzdh')
    # åˆ›å»ºæ¸¸æ ‡å¯¹è±¡
    cursor = connection.cursor()
    sql = """
    SELECT * FROM swzdh.rain
    WHERE STCD = :STCD and tm BETWEEN TO_DATE(:start_time, 'YYYY-MM-DD') AND TO_DATE(:end_time, 'YYYY-MM-DD')  order by tm ASC
    """
    res =[]
    try:
        cursor.execute(sql, {'start_time': start_time, 'end_time': end_time,'STCD':STCD})
        column_names = [row[0] for row in cursor.description]
        print("字段名:", column_names)
        # èŽ·å–æ‰€æœ‰ç»“æžœé›†
        result = cursor.fetchall()
        print(result)
        date_str_arr=[]
        date_str_dict={}
        # è¾“出结果
        name =""
        for row in result:
            name=  row[1]
            times = row[2]
            date_str = times.strftime("%Y-%m-%d")
            R =  row[3]
            if date_str in date_str_dict:
                date_str_dict[date_str] = date_str_dict[date_str] + R
            else:
                date_str_dict[date_str] = R
                date_str_arr.append(date_str)
        for item in date_str_arr:
           value = round(date_str_dict[item],2)
           temp ={"time":item,"value":value,"stname":name}
           res.append(temp)
    except Exception as e:
            print("Error occurred: ", str(e))
    finally:
    # å…³é—­æ¸¸æ ‡å’Œè¿žæŽ¥
        cursor.close()
        connection.close()
    return res
# åœ°ä¸‹æ°´ç›‘测井  ä¸¤ä¸ªè¡¨ V_WT_YRS,V_Z_YRS
def gw_list(start_time,end_time,STCD):
    conn = pymssql.connect(server='192.168.44.66',
                           user='xsyrs',
                           password='gws@xsyrs2024',
                           database='DB_DXS',
                           as_dict=True)
    cursor = conn.cursor()
    res =[]
    try:
        sql = "SELECT * FROM V_Z_YRS where STCD = '" +STCD +"' and TM >= '"+start_time +"' and TM <= '"+end_time +"' order by TM ASC"
        cursor.execute(sql)
        result = cursor.fetchall()
        for row in result:
            times = row["TS"]
            date_str = times.strftime("%Y-%m-%d")
            value =  float(row["Z"])
            bd = float(row["BD"])
            dicts={"time":date_str,"value":value,"bd":bd}
            res.append(dicts)
    except Exception as e:
        print("Error occurred:", str(e))
    finally:
        cursor.close()
        conn.close()
    return res
DataTransf.py
New file
@@ -0,0 +1,526 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 14:44:36 2024
@author: ZMK
"""
import numpy as np
import flopy
from openpyxl import load_workbook
import os
import Base as base
import CalHead
#河流的分段下标
riv_seg={0:[454,479],1:[480,505],2:[506,527],3:[528,562]}
riv_seg_celles=[26,26,22,35]
#边界的分段下标
ibound_seg={0:[1,86],1:[87,111],2:[112,142],3:[143,170],4:[171,240],
             5:[241,282],6:[283,354],7:[355,393],8:[394,436],9:[437,453]}
def base_excel(model_name,types):
    if types =='降雨':
        return rch_excel(model_name)
    if types =='河流':
        return river_excel(model_name)
    if types =='开采量':
        return well_excel(model_name)
    if types =='边界':
        return ibound_excel(model_name)
    return []
#降雨cxcel
def rch_excel(model_name):
    paths =  base.model_dir + model_name +"\\降雨.xlsx"
    data=[]
    if not os.path.exists(paths):
        return data
    wb = load_workbook(filename = paths)
    ws = wb[wb.sheetnames[0]]
    for row in ws.iter_rows():
        tmp =[]
        for cell in row:
          tmp.append(cell.value)
        data.append(tmp)
    wb.close()
    #分析降水并且更新
    rch_analysis(data,model_name)
    return data
def rch_analysis(data_array,model_name):
    periods =CalHead.get_model_period(model_name)
    #周期数
    periods_len= len(periods)
    array =  np.asarray(data_array)
    fid= array[1:17,2]
    params =  array[1:17,3]
    float_params = np.asarray(params,dtype=float)
    #取出来 1-16行  4-col_last列的数据
    col_last = 4 + periods_len
    data = array[1:17,4:col_last]
    float_data = np.asarray(data,dtype=float)
    for i in range(0,len(float_data)):
        for j in range(0,len(float_data[i])):
            float_data[i][j] =  float_data[i][j] * float_params[i] /30/100
    rch_update(float_data,fid,model_name,periods_len)
#更新模型rch数据
def rch_update(float_data,fids,model_name,periods_len):
    if model_name == base.not_allowed_model or model_name in base.archive_models:
        return "本模型为验证模型,不允许修改!"
    model_ws = base.model_dir + model_name
    mx = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws,  exe_name="mf2005", verbose=True,  version="mf2005", check=False)
    areas= base.getAreas()
    for per in range(periods_len):
        #rch ä¸€ä¸ªå‘¨æœŸçš„æ•°æ®
        item = mx.rch.rech.__getitem__(kper=per)
        array2d = item.get_value()
        per_data = float_data[:,per]
        print(per_data)
        for i in range(0,len(per_data)):
            data =round(float(per_data[i]),8)
            fid = fids[i]
            tuples = areas[fid]
            for entity in tuples:
                    x = entity[0]
                    y = entity[1]
                    array2d[x][y]= data
        mx.rch.rech.__setitem__(key=per, value=array2d)
    rch = flopy.modflow.ModflowRch(mx,nrchop=mx.rch.nrchop,
                                        ipakcb=mx.rch.ipakcb,
                                        rech=mx.rch.rech,
                                        irch =mx.rch.irch)
    rch.write_file(check=False)
#河流数据解析
def river_excel(model_name):
    paths =  base.model_dir + model_name +"\\河流.xlsx"
    data=[]
    if not os.path.exists(paths):
        return data
    wb = load_workbook(filename = paths)
    ws = wb[wb.sheetnames[0]]
    for row in ws.iter_rows():
        tmp =[]
        for cell in row:
          tmp.append(cell.value)
        data.append(tmp)
    wb.close()
    result =[]
    for i in range(1,len(data)):
        result.append(data[i])
    river_analysis(result,model_name)
    return result
#分析河流数据
def river_analysis(data_array,model_name):
    periods =CalHead.get_model_period(model_name)
    periods_len= len(periods)
    row_last= 1 + periods_len
    array =  np.asarray(data_array)
    data = array[1:row_last,1:2]
    data2 =  array[1:row_last,1:4]
    merge_array = np.concatenate((data,data2),axis=1)
    params =  array[1:row_last,5:9]
    float_data = np.asarray(merge_array,dtype=float)
    float_params = np.asarray(params,dtype=float)
    for i in range(0,len(float_data)):
        for j in range(0,len(float_data[i])):
              temp = round(float_data[i][j] * float_params[i][j] *30*86400 ,4)
              last = round(temp/riv_seg_celles[j]/30,4)
              float_data[i][j]=last
    print(float_data)
    river_update(float_data,model_name,periods_len)
#更新河流数据
def river_update(float_data,model_name,periods_len):
    if model_name == base.not_allowed_model or model_name in base.archive_models:
        return "本模型为验证模型,不允许修改!"
    model_ws = base.model_dir + model_name
    ml = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws,  exe_name="mf2005", verbose=True,  version="mf2005", check=False)
    #循环设置wel文件,更新数据
    #     lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]}
    lrcq = {}
    for per in range(periods_len):
        wel = []
        wel = ml.wel.stress_period_data.__getitem__(kper=per)
        #河流分段数据
        segment0 = float_data[per][0]
        segment1 = float_data[per][1]
        segment2 = float_data[per][2]
        segment3 = float_data[per][3]
        #存储每个应力期的数据
        array2d = []
        count = 1
        for Layer, Row, Column, Q in wel:
            array = []
            if count>= riv_seg[0][0] and count <= riv_seg[0][1]:
                 array = [Layer, Row, Column, segment0]
            elif  count>= riv_seg[1][0] and count <= riv_seg[1][1]:
                 array = [Layer, Row, Column, segment1]
            elif  count>= riv_seg[2][0] and count <= riv_seg[2][1]:
                array = [Layer, Row, Column, segment2]
            elif  count>= riv_seg[3][0] and count <= riv_seg[3][1]:
                     array = [Layer, Row, Column, segment3]
            else:
                 array = [Layer, Row, Column, Q]
            array2d.append(array)
            count +=1
        lrcq[per] = array2d
    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,
                                  dtype=ml.wel.dtype,
                                  options=ml.wel.options,
                                  stress_period_data=lrcq)
    ml.write_input()
    #解析开采量的数据
def well_excel(model_name):
    paths =  base.model_dir + model_name +"\\开采量.xlsx"
    data=[]
    if not os.path.exists(paths):
        return data
    wb = load_workbook(filename = paths)
    ws = wb[wb.sheetnames[0]]
    for row in ws.iter_rows():
        tmp =[]
        for cell in row:
          tmp.append(cell.value)
        data.append(tmp)
    wb.close()
    result =[]
    for i in range(1,len(data)):
        result.append(data[i])
    well_analysis(result,model_name)
    return result
#开采量的分析
def well_analysis(data_array,model_name):
    periods =CalHead.get_model_period(model_name)
    periods_len= len(periods)
    row_last= 1 + periods_len
    array =  np.asarray(data_array)
    data = array[1:row_last,1:5]
    float_data= np.asarray(data,dtype=float)
    #转置数组
    zz_array = float_data.transpose()
    zz_data = []
    for i in range(50):
        zz_data.append(zz_array[0])
    for i in range(49):
         zz_data.append(zz_array[1])
    for i in range(18):
         zz_data.append(zz_array[2])
    for i in range(12):
         zz_data.append(zz_array[3])
    zz_data.append(zz_array[0])
    np_data = np.asarray(zz_data,dtype=float)
    well_scale = np.loadtxt(base.well_scale_path, dtype=str)
    float_scale= np.asarray(well_scale,dtype=float)
    ##抽水量 = å„城区月开采量*街道比例*10000/(街道井数*30)
    for i in range(0,len(np_data)):
        for j in range(0,len(np_data[i])):
            tmp = np_data[i][j] * float_scale[i][0]*10000/( float_scale[i][1] *30)
            np_data[i][j] = round(tmp,4)
    well_update(np_data,model_name,periods_len)
#更新开采量
def well_update(np_data,model_name,periods_len):
    if model_name == base.not_allowed_model or model_name in base.archive_models:
        return "本模型为验证模型,不允许修改!"
    model_ws = base.model_dir + model_name
    ml = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws,  exe_name="mf2005", verbose=True,  version="mf2005", check=False)
    #循环设置wel文件,更新数据
    #     lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]}
    lrcq = {}
    for per in range(periods_len):
        wel = []
        wel = ml.wel.stress_period_data.__getitem__(kper=per)
        per_data = np_data[:,per]
        #存储每个应力期的数据
        array2d = []
        #well文件中 å‰562个网格不是井
             # Layer= wel[i][0]
             # Row=  wel[i][1]
             # Col = wel[i][2]
             # Q =  wel[i][3]
        for i in range(0,562):
             array = [wel[i][0],wel[i][1], wel[i][2],wel[i][3]]
             array2d.append(array)
        for i in range(562,len(wel)):
            indexid = i-562
            update_data=per_data[indexid]
            array = [wel[i][0],wel[i][1], wel[i][2],update_data]
            array2d.append(array)
        lrcq[per] = array2d
    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,dtype=ml.wel.dtype,
                                  options=ml.wel.options,
                                  stress_period_data=lrcq)
    ml.write_input()
  #解析边界的数据
def ibound_excel(model_name):
    paths =  base.model_dir + model_name +"\\边界.xlsx"
    data=[]
    if not os.path.exists(paths):
        return data
    wb = load_workbook(filename = paths)
    ws = wb[wb.sheetnames[0]]
    for row in ws.iter_rows():
        tmp =[]
        for cell in row:
          tmp.append(cell.value)
        data.append(tmp)
    wb.close()
    result =[]
    for i in range(1,len(data)):
        result.append(data[i])
    np_array =  ibound_analysis(result,model_name)
    #返回页面展示的内容数组
    view_data = ibound_view_data(np_array)
    periods =CalHead.get_model_period(model_name)
    periods_len= len(periods)
    ibound_update(np_array,model_name,periods_len)
    return view_data
#更新边界
def ibound_update(np_array,model_name,periods_len):
    if model_name == base.not_allowed_model or model_name in base.archive_models:
        return "本模型为验证模型,不允许修改!"
    model_ws = base.model_dir + model_name
    ml = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws,  exe_name="mf2005", verbose=True,  version="mf2005", check=False)
    #循环设置wel文件,更新数据
    #     lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]}
    lrcq = {}
    for per in range(periods_len):
        wel = []
        wel = ml.wel.stress_period_data.__getitem__(kper=per)
        per_data = np_array[:,per]
        #存储每个应力期的数据
        array2d = []
        count = 1
        for Layer, Row, Column, Q in wel:
            array = []
            if  count>= ibound_seg[0][0] and count <= ibound_seg[0][1]:
                 array = [Layer, Row, Column, per_data[0]]
            elif  count>= ibound_seg[1][0] and count <= ibound_seg[1][1]:
                 array = [Layer, Row, Column, per_data[1]]
            elif  count>= ibound_seg[2][0] and count <= ibound_seg[2][1]:
                array = [Layer, Row, Column, per_data[2]]
            elif  count>= ibound_seg[3][0] and count <= ibound_seg[3][1]:
                     array = [Layer, Row, Column, per_data[3]]
            elif  count>= ibound_seg[4][0] and count <= ibound_seg[4][1]:
                     array = [Layer, Row, Column, per_data[4]]
            elif  count>= ibound_seg[5][0] and count <= ibound_seg[5][1]:
                     array = [Layer, Row, Column, per_data[5]]
            elif  count>= ibound_seg[6][0] and count <= ibound_seg[6][1]:
                 array = [Layer, Row, Column, per_data[6]]
            elif  count>= ibound_seg[7][0] and count <= ibound_seg[7][1]:
                array = [Layer, Row, Column, per_data[7]]
            elif  count>= ibound_seg[8][0] and count <= ibound_seg[8][1]:
                     array = [Layer, Row, Column, per_data[8]]
            elif  count>= ibound_seg[9][0] and count <= ibound_seg[9][1]:
                     array = [Layer, Row, Column, per_data[9]]
            else:
                 array = [Layer, Row, Column, Q]
            array2d.append(array)
            count +=1
        lrcq[per] = array2d
    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,
                                  dtype=ml.wel.dtype,
                                  options=ml.wel.options,
                                  stress_period_data=lrcq)
    ml.write_input()
#边界的分析
def ibound_analysis(data_array,model_name):
    array =  np.asarray(data_array)
    data = array[1:10,2:14]
    #大台 å—辛房 ä¸Šè‹‡ç”¸ ä¸‰å®¶åº— éº»è£• æ¸©æ³‰ æµ·æ·€ é˜³åŠ æ²™æ²³
    float_data= np.asarray(data,dtype=float)
    # æµå…¥1-1 (1-6)大台 å—辛房 ä¸Šè‹‡ç”¸ ä¸‰å®¶åº— éº»è£• æ¸©æ³‰
    result =[]
    data1= float_data[[0,1,2,3,4,5]]
    sum1 = data1.sum(axis = 0)
    result.append(sum1)
    # æµå…¥1-2()上苇甸
    data2= float_data[2]
    result.append(data2)
     #流入1-3() å¤§å°
    data3 = float_data[0]
    result.append(data3)
     # æµå‡º1-1 å¤§å°  å—辛房
    data4 =  float_data[[0,1]]
    sum4 =  data4.sum(axis = 0)
    result.append((-sum4))
     # æµå‡º1-2   å¤§å°
    data5 =  float_data[0]
    result.append((-data5))
     #流入3-1 å¤§å°
    data6 =  float_data[0]
    result.append(data6)
     # æµå…¥3-2 å¤§å°  å—辛房 ä¸Šè‹‡ç”¸
    data7 =  float_data[[0,1,2]]
    sum7 = data7.sum(axis = 0)
    result.append(sum7)
     # æµå…¥3-3 å¤§å°  å—辛房 ä¸Šè‹‡ç”¸
    # data8= float_data[[0,1,2]]
    result.append(sum7)
     # æµå…¥3-4 å¤§å°  å—辛房 ä¸Šè‹‡ç”¸
    # data9= float_data[[0,1,2]]
    result.append(sum7)
     # æµå…¥3-5 å¤§å°  å—辛房 ä¸Šè‹‡ç”¸
    # data10= float_data[[0,1,2]]
    result.append(sum7)
    np_data = np.asarray(result,dtype=float)
    np_data = np.around(np_data, decimals=2)
    return np_data
def ibound_view_data(np_data):
    names=['流入1-1','流入1-2','流入1-3','流出1-1','流出1-2',
          '流入3-1','流入3-2','流入3-3','流入3-4','流入3-5']
    row_sums = np.sum(np_data, axis=1)
    row_sums= np.around(row_sums,2)
    params=[1,1,1,2.5,1,0.5,0.5,0.5,0.5,0.5]
    cells=[86,47,31,28,50,42,72,39,43,17]
    per=12
    x=30
    water=[]
    for i in range(0,len(names)):
        tmp = round( row_sums[i] * params[i] * cells[i] * per *x , 2)
        water.append(tmp)
    arr=[]
    arr.append(names)
    arr.append(row_sums)
    arr.append(params)
    arr.append(water)
    str_np = np.asarray(arr,dtype=str)
    zz= str_np.transpose()
    title =['边界','降雨量','系数','输入值']
    result =[]
    result.append(title)
    for item in zz:
        result.append(item)
    result = np.asarray(result,dtype=str).tolist()
    return result
GRU_zmk.py
New file
@@ -0,0 +1,275 @@
# -*- coding: utf-8 -*-
"""
Created on Fri May 24 15:33:12 2024
@author: BDYGS
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 23 05:28:13 2024
@author: BDYGS
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Apr  6 04:01:04 2024
@author: BDYGS
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.autograd import Variable
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from torch.utils.data import TensorDataset
from tqdm import tqdm
class Config():
    data_path = "C:\\Users\\ZMK\\Desktop\\GRU\玉泉山井.csv"
    timestep = 60  # æ—¶é—´æ­¥é•¿ï¼Œå°±æ˜¯åˆ©ç”¨å¤šå°‘时间窗口
    batch_size = 30  # æ‰¹æ¬¡å¤§å°
    feature_size = 8  # æ¯ä¸ªæ­¥é•¿å¯¹åº”的特征数量,这里只使用1维,每天的风速
    hidden_size = 256  # éšå±‚大小
    output_size = 15  # ç”±äºŽæ˜¯å•输出任务,最终输出层大小为1,预测未来1天风速
    num_layers = 2  # gru的层数
    epochs = 100 # è¿­ä»£è½®æ•°
    best_loss = 0 # è®°å½•损失
    learning_rate = 0.0003 # å­¦ä¹ çއ
config = Config()
def normalization(data,label):
    mm_x=MinMaxScaler() # å¯¼å…¥sklearn的预处理容器
    mm_y=MinMaxScaler()
    data=mm_x.fit_transform(data) # å¯¹æ•°æ®å’Œæ ‡ç­¾è¿›è¡Œå½’一化等处理
    label=mm_y.fit_transform(label)
    return data,label,mm_y
def split_windows(data,seq_len,output_size):
    x=[]
    y=[]
    for i in range(len(data)-seq_len-1-output_size): # range的范围需要减去时间步长和1
        _x=data[i:(i+seq_len),:]
        _y=data[(i+seq_len):(i+seq_len+output_size),2:]  #注意!!!这个地方是取label的
        x.append(_x)
        y.append(_y)
    print('split_windows_i:',i)
    print(_x.shape,_y.shape)
    x,y=np.array(x),np.array(y)
    print('x.shape,y.shape=\n',x.shape,y.shape)
    return x,y
def split_data(x,y,split_ratio):
    train_size=int(len(y)*split_ratio)
    test_size=len(y)-train_size
    x_data=Variable(torch.Tensor(np.array(x)))
    y_data=Variable(torch.Tensor(np.array(y)))
    x_train=Variable(torch.Tensor(np.array(x[0:train_size])))
    y_train=Variable(torch.Tensor(np.array(y[0:train_size])))
    y_test=Variable(torch.Tensor(np.array(y[train_size:len(y)])))
    x_test=Variable(torch.Tensor(np.array(x[train_size:len(x)])))
    print('x_data.shape,y_data.shape,x_train.shape,y_train.shape,x_test.shape,y_test.shape:\n{}{}{}{}{}{}'
    .format(x_data.shape,y_data.shape,x_train.shape,y_train.shape,x_test.shape,y_test.shape))
    return x_data,y_data,x_train,y_train,x_test,y_test
def nash_sutcliffe_efficiency(y_true, y_pred):
    """
    è®¡ç®—Nash-Sutcliffe Efficiency指标。
    å‚æ•°:
    y_true : array-like, çœŸå®žè§‚测值
    y_pred : array-like, é¢„测值
    è¿”回:
    nse : float, Nash-Sutcliffe Efficiency
    """
    return 1 - np.sum((y_true - y_pred)**2) / np.sum((y_true - np.mean(y_true))**2)
# 1.加载时间序列数据
df= pd.read_csv(config.data_path,parse_dates=["date"],index_col=[0])
#parse_dates将该列视为时间索引
print(df.shape)
data = df.iloc[:,0:8]  # ä»¥ç‰¹å¾ä½œä¸ºæ•°æ®
label = df.iloc[:,7]
data = data.values
label = label.values.reshape(-1,1)
# torch.manual_seed(7)  #设置生成随机数的种子,以保证代码的可重复性
data,label,mm_y=normalization(data,label)
dataX,dataY = split_windows(data,config.timestep,config.output_size)
x_data,y_data,x_train,y_train,x_test,y_test = split_data(dataX,dataY,0.8)
# 5.形成训练数据集
train_data = TensorDataset(x_train,y_train)
test_data = TensorDataset(x_test,y_test)
# 6.将数据加载成迭代器
train_loader = torch.utils.data.DataLoader(train_data,
                                           config.batch_size,
                                           False)
test_loader = torch.utils.data.DataLoader(test_data,
                                          config.batch_size,
                                          False)
# 7.定义GRU网络
class GRU(nn.Module):
    def __init__(self, feature_size, hidden_size, num_layers, output_size):
        super(GRU, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        # éšå±‚大小
        self.num_layers = num_layers  # gru层数
        # feature_size为特征维度,就是每个时间点对应的特征数量,这里为1
        self.gru = nn.GRU(feature_size, hidden_size, num_layers, dropout=0.8,batch_first=True)
        self.fc1 = nn.Linear(self.hidden_size, self.output_size)
        self.fc2 = nn.Linear(self.hidden_size, self.output_size)
        self.fc3 = nn.Linear(self.hidden_size, self.output_size)
        self.fc4 = nn.Linear(self.hidden_size, self.output_size)
        self.fc5 = nn.Linear(self.hidden_size, self.output_size)
        self.fc6 = nn.Linear(self.hidden_size, self.output_size)
    def forward(self, x, hidden=None):
        batch_size = x.size()[0] # èŽ·å–æ‰¹æ¬¡å¤§å°
        # åˆå§‹åŒ–隐层状态
        if hidden is None:
            h_0 = x.data.new(self.num_layers, batch_size, self.hidden_size).fill_(0).float()
        else:
            h_0 = hidden
        # GRU运算
        output, h_0 = self.gru(x, h_0)
        # èŽ·å–GRU输出的维度信息
        batch_size, timestep, hidden_size = output.shape
        # å°†output变成 batch_size * timestep, hidden_dim
        # output = output.reshape(-1, hidden_size)
        preds = []
        pred1, pred2, pred3 = self.fc1(output), self.fc2(output), self.fc3(output)
        pred1, pred2, pred3 = pred1[:, -1, :], pred2[:, -1, :], pred3[:, -1, :]
        pred4, pred5, pred6 = self.fc4(output), self.fc5(output), self.fc6(output)
        pred4, pred5, pred6 = pred4[:, -1, :], pred5[:, -1, :], pred6[:, -1, :]
        pred = torch.stack([pred1, pred2, pred3,pred4, pred5, pred6], dim=2)
        return pred
model = GRU(config.feature_size, config.hidden_size, config.num_layers, config.output_size)  # å®šä¹‰GRU网络
print(model)
loss_function = nn.MSELoss()  # å®šä¹‰æŸå¤±å‡½æ•°
optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)  # å®šä¹‰ä¼˜åŒ–器
# 8.模型训练
for epoch in range(config.epochs):
    model.train()
    running_loss = 0
    train_bar = tqdm(train_loader)  # å½¢æˆè¿›åº¦æ¡
    for data in train_bar:
        x_train, y_train = data  # è§£åŒ…迭代器中的X和Y
        optimizer.zero_grad()
        y_train_pred = model(x_train)
        loss = loss_function(y_train_pred, y_train)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        train_bar.desc = "train epoch[{}/{}] loss:{:.6f}".format(epoch + 1,
                                                                 config.epochs,
                                                                 loss)
print('Finished Training')
model_name = 'GRU_YQS'
torch.save(model.state_dict(), 'C://Users//ZMK//Desktop//GRU/{}.pth'.format(model_name))
model.eval()
#模型验证:用原始数据再算一次
df= pd.read_csv(config.data_path,parse_dates=["date"],index_col=[0])
#parse_dates将该列视为时间索引
data = df.iloc[:,0:8]  # ä»¥å››ä¸ªç‰¹å¾ä½œä¸ºæ•°æ®
label = df.iloc[:,7]
data = data.values
label = label.values.reshape(-1,1)
data,label,mm_y=normalization(data,label)
dataX,dataY = split_windows(data,config.timestep,config.output_size)
x_data,y_data,x_train,y_train,x_test,y_test = split_data(dataX,dataY,0.8)
test_pre = model(x_data)
with pd.ExcelWriter("C:\\Users\\ZMK\\Desktop\\GRU\GRU-YQS.xlsx", engine='openpyxl') as writer:
    for i in range(6):
        test_pre_data = test_pre[:,0,i].data.numpy().reshape(-1,1)
        y_test_pre = y_data[:,0,i].data.numpy().reshape(-1,1)
        print(test_pre_data.shape)
        test_pre_data_inv = mm_y.inverse_transform(test_pre_data)
        # print(test_pre_data_inv.shape)
        y_test_inv =mm_y.inverse_transform(y_test_pre)
        plt.figure(figsize=(10,5))
        plt.plot(y_test_inv)
        plt.plot(test_pre_data_inv)
        plt.legend(('real', 'predict'),fontsize='15')
        plt.show()
        print('MAE/RMSE/NSE')
        print(mean_absolute_error(y_test_inv, test_pre_data_inv))
        print(np.sqrt(mean_squared_error(y_test_inv, test_pre_data_inv)))
        print(nash_sutcliffe_efficiency(y_test_inv, test_pre_data_inv))
        y_test_inv = pd.DataFrame(y_test_inv, columns=[f'True Node {i+1}'])
        test_pre_data_inv = pd.DataFrame(test_pre_data_inv, columns=[f'test Node {i+1}'])
        # å°†ç»“果保存到不同的工作表中
        test_pre_data_inv.to_excel(writer, sheet_name=f'True Node {i+1}', index=False)
        y_test_inv.to_excel(writer, sheet_name=f'test Node {i+1}', index=False)
GRU_zmk_pre - ¸±±¾.py
New file
@@ -0,0 +1,215 @@
# -*- coding: utf-8 -*-
"""
Created on Sun May 26 02:15:11 2024
@author: BDYGS
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.autograd import Variable
# import tushare as ts
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from torch.utils.data import TensorDataset
from tqdm import tqdm
class Config():
   # data_path = "C:\\Users\\ZMK\\Desktop\\GRU\永定河井.csv"
    timestep = 60  # æ—¶é—´æ­¥é•¿ï¼Œå°±æ˜¯åˆ©ç”¨å¤šå°‘时间窗口
    batch_size = 30  # æ‰¹æ¬¡å¤§å°
    feature_size = 8  # æ¯ä¸ªæ­¥é•¿å¯¹åº”的特征数量,这里只使用1维,每天的风速
    hidden_size = 256  # éšå±‚大小
    output_size = 15  # ç”±äºŽæ˜¯å•输出任务,最终输出层大小为1,预测未来1天风速
    num_layers = 2  # gru的层数
    epochs = 100 # è¿­ä»£è½®æ•°
    best_loss = 0 # è®°å½•损失
    learning_rate = 0.0003 # å­¦ä¹ çއ
   # model_name = 'GRU_ZMK' # æ¨¡åž‹åç§°
   # save_path = 'C://Users//ZMK//Desktop//GRU//{}.pth'.format(model_name) # æœ€ä¼˜æ¨¡åž‹ä¿å­˜è·¯å¾„
config = Config()
def normalization(data,label):
    mm_x=MinMaxScaler() # å¯¼å…¥sklearn的预处理容器
    mm_y=MinMaxScaler()
    data=mm_x.fit_transform(data) # å¯¹æ•°æ®å’Œæ ‡ç­¾è¿›è¡Œå½’一化等处理
    label=mm_y.fit_transform(label)
    return data,label,mm_y
def split_windows(data,seq_len,output_size):
    x=[]
    y=[]
    for i in range(len(data)-seq_len-1-output_size): # range的范围需要减去时间步长和1
        _x=data[i:(i+seq_len),:]
        _y=data[(i+seq_len):(i+seq_len+output_size),2:]  #注意!!!这个地方是取label的
        x.append(_x)
        y.append(_y)
    print('split_windows_i:',i)
    print(_x.shape,_y.shape)
    x,y=np.array(x),np.array(y)
    print('x.shape,y.shape=\n',x.shape,y.shape)
    return x,y
def split_windows_long(data,seq_len,output_size):
    print(len(data))
    x=[]
    y=[]
    for i in range(int(len(data)/output_size)-4):
        a = i*output_size
        # print(a)
        _x=data[a:a+seq_len,:]
        # print(_x.shape)
        _y=data[a+seq_len:a+seq_len+output_size,2:]  #注意!!!这个地方是取label的
        # print(_y.shape)
        x.append(_x)
        y.append(_y)
    print('split_windows_i:',i)
    # print(_x,_y)
    x,y=np.array(x),np.array(y)
    print('x.shape,y.shape=\n',x.shape,y.shape)  #  (1035, 60, 4) (1035,)
    return x,y
def nash_sutcliffe_efficiency(y_true, y_pred):
    """
    è®¡ç®—Nash-Sutcliffe Efficiency指标。
    å‚æ•°:
    y_true : array-like, çœŸå®žè§‚测值
    y_pred : array-like, é¢„测值
    è¿”回:
    nse : float, Nash-Sutcliffe Efficiency
    """
    return 1 - np.sum((y_true - y_pred)**2) / np.sum((y_true - np.mean(y_true))**2)
# 7.定义GRU网络
class GRU(nn.Module):
    def __init__(self, feature_size, hidden_size, num_layers, output_size):
        super(GRU, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        # éšå±‚大小
        self.num_layers = num_layers  # gru层数
        # feature_size为特征维度,就是每个时间点对应的特征数量,这里为1
        self.gru = nn.GRU(feature_size, hidden_size, num_layers, dropout=0.8,batch_first=True)
        self.fc1 = nn.Linear(self.hidden_size, self.output_size)
        self.fc2 = nn.Linear(self.hidden_size, self.output_size)
        self.fc3 = nn.Linear(self.hidden_size, self.output_size)
        self.fc4 = nn.Linear(self.hidden_size, self.output_size)
        self.fc5 = nn.Linear(self.hidden_size, self.output_size)
        self.fc6 = nn.Linear(self.hidden_size, self.output_size)
    def forward(self, x, hidden=None):
        batch_size = x.size()[0] # èŽ·å–æ‰¹æ¬¡å¤§å°
        # åˆå§‹åŒ–隐层状态
        if hidden is None:
            h_0 = x.data.new(self.num_layers, batch_size, self.hidden_size).fill_(0).float()
        else:
            h_0 = hidden
        # GRU运算
        output, h_0 = self.gru(x, h_0)
        # èŽ·å–GRU输出的维度信息
        batch_size, timestep, hidden_size = output.shape
        # å°†output变成 batch_size * timestep, hidden_dim
        # output = output.reshape(-1, hidden_size)
        preds = []
        pred1, pred2, pred3 = self.fc1(output), self.fc2(output), self.fc3(output)
        pred1, pred2, pred3 = pred1[:, -1, :], pred2[:, -1, :], pred3[:, -1, :]
        pred4, pred5, pred6 = self.fc4(output), self.fc5(output), self.fc6(output)
        pred4, pred5, pred6 = pred4[:, -1, :], pred5[:, -1, :], pred6[:, -1, :]
        pred = torch.stack([pred1, pred2, pred3,pred4, pred5, pred6], dim=2)
        return pred
model = GRU(config.feature_size, config.hidden_size, config.num_layers, config.output_size)  # å®šä¹‰GRU网络
print(model)
loss_function = nn.MSELoss()  # å®šä¹‰æŸå¤±å‡½æ•°
optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)  # å®šä¹‰ä¼˜åŒ–器
#
model.load_state_dict(torch.load('C://Users//ZMK//Desktop//GRU//GRU_YDH.pth'))
model.eval()
#pre
df_pre = pd.read_csv("C:\\Users\\ZMK\\Desktop\\GRU\\永定河井-pre.csv",parse_dates=["date"],index_col=[0])
print(df_pre.shape)
data_pre = df_pre.iloc[:,0:8]
label_pre = df_pre.iloc[:,7] #label没有实际作用,主要用作正则化缩放的,不参与计算
data_pre = data_pre.values
label_pre = label_pre.values.reshape(-1,1)
data_pre,label_pre,mm_y_pre = normalization(data_pre,label_pre)
dataX_pre,dataY_pre = split_windows_long(data_pre,config.timestep,config.output_size)
dataX_pre = Variable(torch.Tensor(np.array(dataX_pre)))
dataY_pre = Variable(torch.Tensor(np.array(dataY_pre)))
print(dataY_pre.shape)
test_pre = model(dataX_pre)
print(test_pre.shape)
with pd.ExcelWriter("C:\\Users\\ZMK\\Desktop\\GRU\\GRU-pre-ydh.xlsx", engine='openpyxl') as writer:
    for i in range(6):
        test_pre_data = test_pre[:,:,i].data.numpy().reshape(-1,1)
        y_test_pre = dataY_pre[:,:,i].data.numpy().reshape(-1,1)
        test_pre_data_inv = mm_y_pre.inverse_transform(test_pre_data)
        y_test_inv =mm_y_pre.inverse_transform(y_test_pre)
       # plt.figure(figsize=(10,5))
       # plt.plot(y_test_inv)
       # plt.plot(test_pre_data_inv)
       # plt.legend(('real', 'predict'),fontsize='15')
       # plt.show()
        print('MAE/RMSE/NSE')
        print(mean_absolute_error(y_test_inv, test_pre_data_inv))
        print(np.sqrt(mean_squared_error(y_test_inv, test_pre_data_inv)))
        print(nash_sutcliffe_efficiency(y_test_inv, test_pre_data_inv))
        y_test_inv = pd.DataFrame(y_test_inv, columns=[f'True Node {i+1}'])
        test_pre_data_inv = pd.DataFrame(test_pre_data_inv, columns=[f'pre Node {i+1}'])
        # å°†ç»“果保存到不同的工作表中
        test_pre_data_inv.to_excel(writer, sheet_name=f'True Node {i+1}', index=False)
        y_test_inv.to_excel(writer, sheet_name=f'pre Node {i+1}', index=False)
GRU_zmk_pre.py
New file
@@ -0,0 +1,267 @@
# -*- coding: utf-8 -*-
"""
Created on Sun May 26 02:15:11 2024
@author: BDYGS
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.autograd import Variable
# import tushare as ts
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from torch.utils.data import TensorDataset
from tqdm import tqdm
from datetime import datetime
import DataTask
path_dir_left ="C://Users//ZMK//Desktop//GRU//"
path_dir_right ="C:\\Users\\ZMK\\Desktop\\GRU\\"
class Config():
   # data_path = "C:\\Users\\ZMK\\Desktop\\GRU\永定河井.csv"
    timestep = 60  # æ—¶é—´æ­¥é•¿ï¼Œå°±æ˜¯åˆ©ç”¨å¤šå°‘时间窗口
    batch_size = 30  # æ‰¹æ¬¡å¤§å°
    feature_size = 8  # æ¯ä¸ªæ­¥é•¿å¯¹åº”的特征数量,这里只使用1维,每天的风速
    hidden_size = 256  # éšå±‚大小
    output_size = 15  # ç”±äºŽæ˜¯å•输出任务,最终输出层大小为1,预测未来1天风速
    num_layers = 2  # gru的层数
    epochs = 100 # è¿­ä»£è½®æ•°
    best_loss = 0 # è®°å½•损失
    learning_rate = 0.0003 # å­¦ä¹ çއ
   # model_name = 'GRU_ZMK' # æ¨¡åž‹åç§°
   # save_path = 'C://Users//ZMK//Desktop//GRU//{}.pth'.format(model_name) # æœ€ä¼˜æ¨¡åž‹ä¿å­˜è·¯å¾„
config = Config()
def normalization(data,label):
    mm_x=MinMaxScaler() # å¯¼å…¥sklearn的预处理容器
    mm_y=MinMaxScaler()
    data=mm_x.fit_transform(data) # å¯¹æ•°æ®å’Œæ ‡ç­¾è¿›è¡Œå½’一化等处理
    label=mm_y.fit_transform(label)
    return data,label,mm_y
def split_windows(data,seq_len,output_size):
    x=[]
    y=[]
    for i in range(len(data)-seq_len-1-output_size): # range的范围需要减去时间步长和1
        _x=data[i:(i+seq_len),:]
        _y=data[(i+seq_len):(i+seq_len+output_size),2:]  #注意!!!这个地方是取label的
        x.append(_x)
        y.append(_y)
    print('split_windows_i:',i)
    print(_x.shape,_y.shape)
    x,y=np.array(x),np.array(y)
    print('x.shape,y.shape=\n',x.shape,y.shape)
    return x,y
def split_windows_long(data,seq_len,output_size):
    print(len(data))
    x=[]
    y=[]
    for i in range(int(len(data)/output_size)-4):
        a = i*output_size
        # print(a)
        _x=data[a:a+seq_len,:]
        # print(_x.shape)
        _y=data[a+seq_len:a+seq_len+output_size,2:]  #注意!!!这个地方是取label的
        # print(_y.shape)
        x.append(_x)
        y.append(_y)
    print('split_windows_i:',i)
    # print(_x,_y)
    x,y=np.array(x),np.array(y)
    print('x.shape,y.shape=\n',x.shape,y.shape)  #  (1035, 60, 4) (1035,)
    return x,y
def nash_sutcliffe_efficiency(y_true, y_pred):
    """
    è®¡ç®—Nash-Sutcliffe Efficiency指标。
    å‚æ•°:
    y_true : array-like, çœŸå®žè§‚测值
    y_pred : array-like, é¢„测值
    è¿”回:
    nse : float, Nash-Sutcliffe Efficiency
    """
    return 1 - np.sum((y_true - y_pred)**2) / np.sum((y_true - np.mean(y_true))**2)
# 7.定义GRU网络
class GRU(nn.Module):
    def __init__(self, feature_size, hidden_size, num_layers, output_size):
        super(GRU, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        # éšå±‚大小
        self.num_layers = num_layers  # gru层数
        # feature_size为特征维度,就是每个时间点对应的特征数量,这里为1
        self.gru = nn.GRU(feature_size, hidden_size, num_layers, dropout=0.8,batch_first=True)
        self.fc1 = nn.Linear(self.hidden_size, self.output_size)
        self.fc2 = nn.Linear(self.hidden_size, self.output_size)
        self.fc3 = nn.Linear(self.hidden_size, self.output_size)
        self.fc4 = nn.Linear(self.hidden_size, self.output_size)
        self.fc5 = nn.Linear(self.hidden_size, self.output_size)
        self.fc6 = nn.Linear(self.hidden_size, self.output_size)
    def forward(self, x, hidden=None):
        batch_size = x.size()[0] # èŽ·å–æ‰¹æ¬¡å¤§å°
        # åˆå§‹åŒ–隐层状态
        if hidden is None:
            h_0 = x.data.new(self.num_layers, batch_size, self.hidden_size).fill_(0).float()
        else:
            h_0 = hidden
        # GRU运算
        output, h_0 = self.gru(x, h_0)
        # èŽ·å–GRU输出的维度信息
        batch_size, timestep, hidden_size = output.shape
        # å°†output变成 batch_size * timestep, hidden_dim
        # output = output.reshape(-1, hidden_size)
        preds = []
        pred1, pred2, pred3 = self.fc1(output), self.fc2(output), self.fc3(output)
        pred1, pred2, pred3 = pred1[:, -1, :], pred2[:, -1, :], pred3[:, -1, :]
        pred4, pred5, pred6 = self.fc4(output), self.fc5(output), self.fc6(output)
        pred4, pred5, pred6 = pred4[:, -1, :], pred5[:, -1, :], pred6[:, -1, :]
        pred = torch.stack([pred1, pred2, pred3,pred4, pred5, pred6], dim=2)
        return pred
#根据编号获取不同的预测模型
def getModelName(well_num):
    if well_num in DataTask.arr1:
        return 'GRU_YDH.pth'
    else :
        return 'GRU_YQS.pth'
#写入数据到csv
def write_csv(model_name , np_result,csv_path):
    df =""
    if model_name =='GRU_YDH.pth':
        df = pd.DataFrame({"date":np_result[:,0], "Myrainfall":np_result[:,1],
                   "flowrate":np_result[:,2], "LQWB":np_result[:,3],
                   "ZSSC":np_result[:,4], "WTY":np_result[:,5],
                   "LSH":np_result[:,6], "HZZ":np_result[:,7],"GC":np_result[:,8]
                   })
    else:
        df = pd.DataFrame({"date":np_result[:,0], "HDrainfall":np_result[:,1],
                   "flowrate":np_result[:,2], "SXC":np_result[:,3],
                   "XM1":np_result[:,4], "XM2":np_result[:,5],
                   "SYSC":np_result[:,6], "SJYY":np_result[:,7],"BW":np_result[:,8]
                   })
    df.to_csv(csv_path, index=False)
    #运行预测模型
def runPredictModel(well_num):
    data =""
    csv_path = ""
    excel_path = ""
    #预测模型
    model_name = getModelName(well_num)
        #永定河模型
    if model_name == 'GRU_YDH.pth':
        csv_path = path_dir_right + "永定河井-pre.csv"
        excel_path = path_dir_right +  "永定河井-预测结果.xlsx"
        data = DataTask.get_ydh15_real_data()
    else:
        csv_path = path_dir_right + "玉泉山井-pre.csv"
        excel_path =  path_dir_right +  "玉泉山井-预测结果.xlsx"
        data = DataTask.getyqs15_real_data()
    #写入数据到csv
    write_csv(model_name,data,csv_path)
    model_path =   path_dir_left  + model_name
    model = GRU(config.feature_size, config.hidden_size, config.num_layers, config.output_size)  # å®šä¹‰GRU网络
    loss_function = nn.MSELoss()  # å®šä¹‰æŸå¤±å‡½æ•°
    optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)  # å®šä¹‰ä¼˜åŒ–器
    model.load_state_dict(torch.load(model_path))
    model.eval()
     #CSV æ–‡ä»¶æ•°æ® è·¯å¾„
    #  "C:\\Users\\ZMK\\Desktop\\GRU\\永定河井-pre.csv"
    df_pre = pd.read_csv(csv_path , parse_dates=["date"],index_col=[0])
    data_pre = df_pre.iloc[:,0:8]
    label_pre = df_pre.iloc[:,7] #label没有实际作用,主要用作正则化缩放的,不参与计算
    data_pre = data_pre.values
    label_pre = label_pre.values.reshape(-1,1)
    data_pre,label_pre,mm_y_pre = normalization(data_pre,label_pre)
    dataX_pre,dataY_pre = split_windows_long(data_pre,config.timestep,config.output_size)
    dataX_pre = Variable(torch.Tensor(np.array(dataX_pre)))
    dataY_pre = Variable(torch.Tensor(np.array(dataY_pre)))
    test_pre = model(dataX_pre)
    with pd.ExcelWriter( excel_path ,  engine='openpyxl') as writer:
        for i in range(6):
            test_pre_data = test_pre[:,:,i].data.numpy().reshape(-1,1)
            y_test_pre = dataY_pre[:,:,i].data.numpy().reshape(-1,1)
            test_pre_data_inv = mm_y_pre.inverse_transform(test_pre_data)
            y_test_inv =mm_y_pre.inverse_transform(y_test_pre)
            y_test_inv = pd.DataFrame(y_test_inv, columns=[f'True Node {i+1}'])
            test_pre_data_inv = pd.DataFrame(test_pre_data_inv, columns=[f'pre Node {i+1}'])
            # å°†ç»“果保存到不同的工作表中
            test_pre_data_inv.to_excel(writer, sheet_name=f'True Node {i+1}', index=False)
            y_test_inv.to_excel(writer, sheet_name=f'pre Node {i+1}', index=False)
MainAPI.py
@@ -4,21 +4,21 @@
from flask import jsonify
from flask import request
from flask_cors import CORS
import sys
import numpy as np
import pandas as pd
import flopy
import flopy.utils.binaryfile as bf
import csv
import time 
from openpyxl import load_workbook
import os 
import shutil
import json
import Base as base 
import CalHead
import Predict
import json
import ModelPeriod
import AchiveReport as achiveReport
import BigData
import OpenExcel
import DataTransf
import DataTask
# Flask函数接收一个参数__name__,它会指向程序所在的包
app = Flask(__name__)
@@ -33,11 +33,13 @@
#河流cell的数量
riverCellSize = 109
iboundGroupSize = 5
iboundGroup={1:[1,86],2:[87,111],3:[112,142],4:[143,170],5:[171,240]}
iboundGroupSize = 10
iboundGroup={1:[1,86],2:[87,111],3:[112,142],4:[143,170],5:[171,240],
             6:[241,282],7:[283,354],8:[355,393],9:[394,436],10:[437,453]}
iboundGroup3Size = 5
iboundGroup3={1:[241,282],2:[283,354],3:[355,393],4:[394,436],5:[437,453]}
iboundName =["西侧流入","北部流入","东北部流入","东部流出","南部流出",
             "山区流入","永定河断裂流入","黄庄高丽断裂流入","八宝山断裂流入","昆明湖断裂入流"]
riverGroupSize = 4
riverGroup={1:[454,479],2:[480,505],3:[506,527],4:[528,562]}
@@ -49,7 +51,7 @@
def getModel(model_name):
    model_ws=""
    if not model_name:
        model_ws = "202001_202212"
        model_ws = base.not_allowed_model
    else:
         model_ws = base.model_dir + model_name
@@ -71,8 +73,31 @@
    
    months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
    dict["months"]=months
    print(jsondata)
    if "initHeader" in jsondata:
        dict["initHead"] = jsondata["initHeader"]
    else:
        dict["initHead"] = ""
    return jsonify(dict)
#保存初始水头
@app.route('/saveInitHead', methods=['POST'])
def saveInitHead():
    jsondata = request.get_json()
    model_name = str(jsondata['model_name'])
    initHeader =  str(jsondata['initHead'])
    if model_name == base.not_allowed_model or model_name in base.archive_models:
        return jsonify("本模型为验证模型,不允许修改!")
    jsondata= CalHead.get_model_json(model_name)
    jsondata["initHeader"] = initHeader
    jpath = base.model_dir + model_name +"\\prediction.json"
    with open(jpath, "w",encoding='utf-8') as outfile:
        json.dump(jsondata, outfile,ensure_ascii=False)
    return jsonify("保存初始水头成功!")
#读取wel文件 å‚数为 Period
@app.route('/welList/', methods=['GET'])
@@ -97,8 +122,13 @@
    result_len = len(result)
    
    ibound_segment={}
    if layerparam == '1':
             #边界
        ibound_segment={"1":[0,85],"2":[86,110],"3":[111,141],"4":[142,169],"5":[170,239]}
        for i in range(0, 240):
            iboundarray.append(result[i])
     #河流
@@ -114,12 +144,17 @@
            welarray.append(result[i])
            
    elif layerparam == '3':
        ibound_segment={"6":[0,41],"7":[42,113],"8":[114,152],"9":[153,195],"10":[196,212]}
        for i in range(240, 453):
            iboundarray.append(result[i])
    
    ibounddict = {"name": "ibound", "data": iboundarray,"segment":ibound_segment}
    
    ibounddict = {"name": "ibound", "data": iboundarray}
    riverdict = {"name": "river", "data": riverarray}
    riversgement={"1":[0,25],"2":[26,51],"3":[52,73],"4":[74,108]}
    riverdict = {"name": "river", "data": riverarray,"segment":riversgement}
    weldict = {"name": "wel", "data": welarray}
@@ -128,6 +163,7 @@
    data.append(ibounddict)
    data.append(weldict)
    return jsonify(data)
#读取单个井的数据
@app.route('/wel/', methods=['GET'])
@@ -169,6 +205,9 @@
    # model_name = request.args.get('model_name')  
    model_name = str(json['model_name'])
    
    if model_name == base.not_allowed_model or model_name in base.archive_models:
        return jsonify("本模型为验证模型,不允许修改!")
    ml= getModel(model_name)
    
@@ -199,119 +238,19 @@
            
        lrcq[per] = array2d 
    flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,dtype=ml.wel.dtype,
                                 options=ml.wel.options,
                                 stress_period_data=lrcq)
    ml.write_input()
    
    return jsonify("数据更新完毕!")
#读入电子表格文件生成wel æ–‡ä»¶
@app.route('/cellFileInput', methods=['POST'])
def cellFileInput():
    path ='C:\\Users\\ZMK\\Desktop\\待发送\\cell文件.xlsx'
    data = get_cell_data(path)
    lrcq= get_cell_struct(data["excel1"],data["excel2"],data["excel3"])
    model_name = request.args.get('model_name')
    ml= getModel(model_name)
    flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
    ml.write_input()
    return jsonify("sucess")
def get_cell_struct(excel1,excel2,excel3):
     lrcq={}
     #周期数量
     period = 7
     start_row_index = 1
     #边界数据 excel
     for col in range (0,period):
         array =[]
         for row in range(start_row_index, len(excel1)):
             arr = [excel1[row][2]-1,excel1[row][3]-1,excel1[row][4]-1,excel1[row][6+col]]
             array.append(arr)
         lrcq[col]= array
     #河流数据 excel
     for col in range (0,period):
         array =[]
         for row in range(start_row_index, len(excel2)):
             arr = [excel2[row][2]-1,excel2[row][3]-1,excel2[row][4]-1,excel2[row][6+col]]
             array.append(arr)
         lrcq[col].extend(array)
      #抽水数据 excel
     for col in range (0,period):
          array =[]
          for row in range(start_row_index, len(excel3)):
              arr = [excel3[row][1]-1,excel3[row][2]-1,excel3[row][3]-1,excel3[row][8+col]]
              array.append(arr)
          lrcq[col].extend(array)
     return lrcq
#获取cell文件
#file_path æ–‡ä»¶çš„路径
def get_cell_data(file_path):
    workbook = load_workbook(file_path)
    sheetnames = workbook.get_sheet_names()
    #read  first  sheet
    sheet1 = workbook[sheetnames[0]]
    sheet2 = workbook[sheetnames[1]]
    sheet3 = workbook[sheetnames[2]]
    excel1 =[]
    excel2=[]
    excel3=[]
    # éåŽ†è¯»å–æ•´ä¸ªå·¥ä½œè¡¨
    for row in sheet1.iter_rows(values_only=True):
        array=[]
        for cell in row:
            array.append(cell)
        excel1.append(array)
    for row in sheet2.iter_rows(values_only=True):
        array=[]
        for cell in row:
            array.append(cell)
        excel2.append(array)
    for row in sheet3.iter_rows(values_only=True):
        array=[]
        for cell in row:
            array.append(cell)
        excel3.append(array)
    # å…³é—­Excel文件
    workbook.close()
    data={"excel1":excel1,"excel2":excel2,"excel3":excel3}
    return data
#读取wel文件 å‚数为 Period
@app.route('/iboundList/', methods=['GET'])
def iboundList():
    return jsonify(iboundGroupSize)
    return jsonify(iboundName)
#边界的分组数据
@@ -360,6 +299,10 @@
    data = json['data']
    
    model_name = json['model_name']
    if model_name == base.not_allowed_model or model_name in base.archive_models:
        return jsonify("本模型为验证模型,不允许修改!")
    ml= getModel(model_name)
    
    index = iboundGroup[no]
@@ -392,7 +335,10 @@
            
        lrcq[per] = array2d 
    flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,
                                 dtype=ml.wel.dtype,
                                 options=ml.wel.options,
                                 stress_period_data=lrcq)
    ml.write_input()
    return jsonify("数据更新完毕!")
@@ -458,6 +404,9 @@
    end_index = index[1]
    model_name = json['model_name']
    
    if model_name == base.not_allowed_model or model_name in base.archive_models:
        return jsonify("本模型为验证模型,不允许修改!")
    ml= getModel(model_name)
    
    periods =CalHead.get_model_period(model_name)
@@ -487,7 +436,11 @@
            
        lrcq[per] = array2d 
    flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
    flopy.modflow.ModflowWel(ml,ipakcb= ml.wel.ipakcb,
                                 dtype=ml.wel.dtype,
                                 options=ml.wel.options,
                                 stress_period_data=lrcq)
    ml.write_input()
    return jsonify("数据更新完毕!")
@@ -529,55 +482,14 @@
    return jsonify(result)
#降水数据修改
# @app.route('/precipitationInput', methods=['POST'])
# def precipitationInput():
#     json = request.get_json()
#     model_name= str(json['model_name'])
#     period = int(json['period'])
#     #有序的应力周期列表 json
#     data = json['data']
#     dict = {}
#     for i in range(len(data)):
#         q1 = data[i]['Q1']
#         q2 = data[i]['Q2']
#         dict[q1] = q2
#     ml= getModel(model_name)
#     item = ml.rch.rech.__getitem__(kper=period)
#     array2d = item.get_value()
#     count = 0
#     array2d_len = len(array2d)
#     for i in range(array2d_len):
#         array_len = len(array2d[i])
#         for j in range(array_len):
#             va = str(array2d[i][j])
#             if va in dict:
#                 count += 1
#                 array2d[i][j] = float(dict[va])
#     ml.rch.rech.__setitem__(key=period, value=array2d)
#     rch = flopy.modflow.ModflowRch(ml, rech=ml.rch.rech)
#     rch.write_file(check=False)
#     #ml.write_input()
#     return jsonify("降水参数修改完毕!")
@app.route('/precipitationInput', methods=['POST'])
def precipitationInput():
     
    json = request.get_json()
    model_name= str(json['model_name'])
    if model_name == base.not_allowed_model or model_name in base.archive_models:
        return jsonify("本模型为验证模型,不允许修改!")
    period = int(json['period'])
    #有序的应力周期列表 json
    data = json['data']
@@ -597,7 +509,7 @@
        
        tuples= areas[key]
        zblen= len(tuples)
        values = float(dict[key])
        values = round(float(dict[key]),8)
        for i in range(zblen):
            x = tuples[i][0]
            y = tuples[i][1]
@@ -605,106 +517,14 @@
        
    ml.rch.rech.__setitem__(key=period, value=array2d)
    
    rch = flopy.modflow.ModflowRch(ml, rech = ml.rch.rech)
    rch = flopy.modflow.ModflowRch(ml,nrchop=ml.rch.nrchop,
                                       ipakcb=ml.rch.ipakcb,
                                       rech=ml.rch.rech,
                                       irch =ml.rch.irch)
    rch.write_file(check=False)
    # ml.write_input()
    return jsonify("降水参数修改完毕!")
#导入csv文件
@app.route('/precipitationInputFile', methods=['POST'])
def precipitationInputFile():
    model_name = request.args.get('model_name')
    ml= getModel(model_name)
    save_path = 'C:/Users/ZMK/Desktop/test1/' + "1111.xlsx"
    file = request.files.get('file')
    if file:
        file.save(save_path)
        #获取读取的excel è¡¨æ ¼æ•°æ®
        stations = get_station_struct(save_path)
        #循环周期
        #perd周期变量
        #array2d æ¯ä¸ªå‘¨æœŸçš„二维数组
        for perd in range(0,36):
            period = perd
            item = ml.rch.rech.__getitem__(kper=period)
            array2d = item.get_value()
            array2d_len = len(array2d)
            count = 0
            #循环站点将一个period的所有stations进行字典存储
            dict = {}
            for k in range(0,len(stations)):
                row = stations[k]["row"]
                column = stations[k]["column"]
                data_old = array2d[row][column]
                data_new = stations[k]["data"][perd]
                dict[data_old]= data_new
            #循环设置每个period çš„值
            for i in range(array2d_len):
              array_len = len(array2d[i])
              for j in range(array_len):
                  va = str(array2d[i][j])
                  if va in dict:
                      array2d[i][j] = float(dict[va])
            #将array2d重新set å¯¹åº”çš„ item å‘¨æœŸå†…
            ml.rch.rech.__setitem__(key=period, value=array2d)
        rch = flopy.modflow.ModflowRch(ml, rech=ml.rch.rech)
        rch.write_file(check=False)
        # ml.write_input()
        return '文件上传成功'
    else:
        return '上传失败,未选择文件'
#获取站点的数据,构造数据结构
#file_path æ–‡ä»¶çš„路径
def get_station_struct(file_path):
    workbook = load_workbook(file_path)
    sheetnames = workbook.get_sheet_names()
    #read  first  sheet
    sheet = workbook[sheetnames[0]]
    array2d_excel=[]
    # éåŽ†è¯»å–æ•´ä¸ªå·¥ä½œè¡¨
    for row in sheet.iter_rows(values_only=True):
        array=[]
        for cell in row:
            array.append(cell)
        array2d_excel.append(array)
    # å…³é—­Excel文件
    workbook.close()
    #数据的开始下标
    data_start_index=6
    #信息的开始行号
    start_row_index = 1
    #存储站点信息
    stations = []
    for i in range (start_row_index,len(array2d_excel)):
        st={"name":array2d_excel[i][1],"row":array2d_excel[i][4],"column":array2d_excel[i][5]}
        data=[]
        for j in range(data_start_index,len(array2d_excel[i])):
            cell_data = array2d_excel[i][j]
            cell_data= cell_data/100/30*0.15
            data.append(round(cell_data, 6))
        st["data"]= data
        stations.append(st)
    return stations
    
#运行模型
@@ -712,22 +532,30 @@
def runModel():
    model_name = request.args.get('model_name')
    
    msg= Predict.run_model_predict(model_name)
    #导出csv文件
    csvpath = CalHead.exportCsV(model_name)
    if model_name == base.not_allowed_model or model_name in base.archive_models:
        return jsonify("本模型为验证模型,不允许修改!")
    
    #更新模型三维网格配置
    dicts= Predict.run_model_predict(model_name)
    if dicts["code"] == 400:
        return dicts["msg"]
    #导出csv文件
    CalHead.exportCsV(model_name)
    # #更新模型三维网格配置
    base.updateModelConfig(model_name)
    
    #创建模型的三维网格
    # #创建模型的三维网格
    filedir = base.model3d_path + model_name
    
    print(filedir)
    if not os.path.exists(filedir):
        os.makedirs(filedir, exist_ok=True)
        base.callModelexe()
        
    return jsonify(msg)
    base.callModelexe()
    #计算水资源量和水均衡
    CalHead.run_zonebudget_bal(model_name)
    CalHead.run_zonebudget_res(model_name)
    return jsonify(dicts["msg"])
#生成模型csv æ–‡ä»¶
@app.route('/runModelCsv/', methods=['GET'])
@@ -739,23 +567,6 @@
    return jsonify(result)
#初始水位信息
@app.route('/initWater/', methods=['GET'])
def initWater():
    period = request.args.get('period')
    per = int(period)
    model_name = request.args.get('model_name')
    ml= getModel(model_name)
    item = ml.rch.rech.__getitem__(kper=per)
    value = item.get_value()
    t = np.array(value).tolist()
    return jsonify(t)
#创建新模型
@app.route('/saveModel/', methods=['GET'])
def saveModel():
@@ -763,29 +574,65 @@
    modelname = request.args.get('name')
    startTime = request.args.get('startTime')
    endTime = request.args.get('endTime')
    remark = request.args.get('remark')
    file_list = os.listdir(base.model_dir)
    for name in file_list:
        if name == modelname:
             return jsonify("模型名称已经存在,不允许重复创建!")
    
    pers = ModelPeriod.get_months_in_range_count(startTime,endTime)
    if pers > 60 :
       return jsonify("模型创建失败,最多只允许60个周期的连续预测!")
    dir = base.model_dir + modelname  
    if pers==12:
    shutil.copytree(base.predictModel,dir) 
    else:
        #如果不是一年的预测, å¯èƒ½ä¸º å…¶ä»–的多周期的预测 å¤§äºŽ 12 ä¸ªå‘¨æœŸæˆ–者 å°äºŽ12个周期
        #首先把60个周期的当做模型clone一份, ç„¶åŽä¿®æ”¹ dis wel rch æ–‡ä»¶
        #wel æ–‡ä»¶å’Œ rch æ–‡ä»¶ä¸éœ€è¦ä¿®æ”¹
        shutil.copytree(base.predictModel60,dir)
        Predict.updateDisFile(modelname,pers)
    
    jsondata={"model_name":modelname,"start_time":startTime,"end_time":endTime}
    predictionJson = base.model_dir + modelname +"\\prediction.json"
    with open(predictionJson, "w",encoding='utf-8') as outfile:
        json.dump(jsondata, outfile,ensure_ascii=False)
        
    CalHead.addModelJson(modelname, startTime, endTime, remark)
    return jsonify("创建新模型完毕!")
#创建新模型
@app.route('/ModelList/', methods=['GET'])
def ModelList():
    file_list = os.listdir(base.model_dir)
    return jsonify(file_list)
#模型列表2
@app.route('/ModelList2/', methods=['GET'])
def ModelList2():
    model_path = base.prefix  +"\\model_list.json"
    model_lsit=""
    with open(model_path,encoding='utf-8') as f:
             model_lsit = json.load(f)
    return jsonify(model_lsit)
#删除模型
@app.route('/deleteModel/', methods=['GET'])
def deleteModel():
    model_name = request.args.get('model_name')
    if not model_name:
         return jsonify({"code":400,"msg":"不允许删除空目录!"})
    if model_name == base.not_allowed_model:
         return jsonify({"code":400,"msg":"验证模型不允许删除!"})
    paths = base.model_dir + model_name
    shutil.rmtree(paths)
    CalHead.removeModelJson(model_name)
    return jsonify({"code":200,"msg":"模型删除完毕!"})
#预测场景参数
@app.route('/prediction', methods=['POST'])
@@ -801,7 +648,10 @@
    with open(predictionJson, "w",encoding='utf-8') as outfile:
        json.dump(jsondata, outfile,ensure_ascii=False)
           
    return jsonify("保存预测场景参数完毕!")
    #运行模型
    dicts = Predict.run_model(model_name)
    return jsonify(dicts["msg"])
#预测场景参数
@@ -809,6 +659,7 @@
def predictionparam():
    model_name = request.args.get('model_name')       
    print(model_name)
    file_list = os.listdir(base.model_dir)
    if model_name not in file_list:
        return jsonify("模型不存在!")
@@ -862,28 +713,31 @@
def pump_importdata():
            
    model_name = request.form.get('model_name')   
    # ml= getModel(model_name)
    types = request.form.get('type')
    file = request.files.get('file')
    print(types)
    
    save_path = base.model_dir + model_name +"\\extra_cell.xlsx"
    save_path = base.model_dir + model_name +"\\"+types+".xlsx"
    print(save_path)
   
    if file: 
        file.save(save_path)
    
    resultDict={"code":200,"msg":"保存数据完毕!"}
    data= DataTransf.base_excel(model_name,types)
    resultDict={"code":200,"msg":"保存数据完毕!","data":data}
    return jsonify(resultDict)
#观测井列表
@app.route('/obsWellList', methods=['GET'])
def obsWellList():
    obswell= base.obs_well
    dict =[]
    for name , row ,column in obswell:
        obj ={"name":name,"row":row,"column":column,"Layer":1}
        dict.append(obj)
    dicts =[]
    for wellId, name , row ,column in obswell:
        obj ={"wellId":wellId,"name":name,"row":row,"column":column,"Layer":1}
        dicts.append(obj)
    
    return jsonify(dict)
    return jsonify(dicts)
    
 
#观测井chart数据接口
@@ -892,8 +746,9 @@
    model_name = request.args.get('model_name') 
    row = request.args.get('row')  
    column = request.args.get('column')  
    wellId = request.args.get('wellId')
    result = CalHead.obsChartdata(model_name, row, column)
    result = CalHead.obsChartdata(wellId,model_name, row, column)
   
    return jsonify(result)
@@ -904,8 +759,8 @@
    base_year = request.args.get('base_year') 
    start_time = request.args.get('start_time')  
    end_time = request.args.get('end_time')  
    return jsonify(Predict.predict_river_chart(base_year, start_time, end_time))
    value = float(request.args.get('value'))
    return jsonify(Predict.predict_river_chart(base_year, start_time, end_time,value))
#预测页面 é™æ°´å›¾è¡¨
@app.route('/predictWaterChart', methods=['GET'])
@@ -913,15 +768,57 @@
    base_year = request.args.get('base_year') 
    start_time = request.args.get('start_time')  
    end_time = request.args.get('end_time') 
    return jsonify(Predict.predict_water_chart(base_year, start_time, end_time))
    value = float(request.args.get('value'))
    return jsonify(Predict.predict_water_chart(base_year, start_time, end_time,value))
@app.route('/heatmap', methods=['GET'])
def heatmap():
#开采量的折线图
@app.route('/predictWellChart', methods=['POST'])
def predictWellChart():
    json = request.get_json()
    base_year = str(json['base_year'])
    start_time = json['start_time']
    end_time = json['end_time']
    data = json['data']
    return jsonify(Predict.predict_well_chart(base_year, start_time, end_time,data))
#流场图
@app.route('/flowField', methods=['GET'])
def flowField():
    model_name = request.args.get('model_name') 
    period = request.args.get('period')
    data = CalHead.heatmapdata(model_name,period)
    maximum_value = np.max(data)
    return jsonify(np.array(data).tolist())
    flowStartTime = int(request.args.get('flowStartTime'))
    flowEndTime=int(request.args.get('flowEndTime'))
    flowType= request.args.get('flowType')
    # layer = int(request.args.get('layer') )
    pic = str(int(time.time())) +".png"
    outpath = base.flow_file + pic
    dicts={}
    if flowType=="水位":
        dicts = achiveReport.flow_field(model_name, flowStartTime, 0,"流场信息", "online", outpath)
    if flowType=="变幅":
        dicts = achiveReport.getFlowFieldBF(model_name,flowStartTime,flowEndTime,outpath)
    if flowType=="埋深":
       dicts = achiveReport.getFlowFieldDepth(model_name,flowStartTime, 0,"流场信息", "online", outpath)
    dicts["path"] = "/xishan/xinshanFlow/"+pic
    return jsonify(dicts)
#初始水位流场信息
@app.route('/initWaterFlow/', methods=['GET'])
def initWater():
    model_name = request.args.get('model_name')
    layer = int(request.args.get('layer') )
    pic = str(int(time.time())) +".png"
    outpath = base.flow_file + pic
    result=  achiveReport.init_flow_field(model_name, layer,"初始流场信息", "online", outpath)
    if result == "#":
         return jsonify("")
    return jsonify("/xishan/xinshanFlow/"+pic)
#降水均衡
@@ -935,15 +832,104 @@
#地球页面数据
@app.route('/earthWaterChart', methods=['GET'])
def earthWaterChart():
    model_name = request.args.get('model_name')
    indexId = int(request.args.get('index_id')) 
    data =  CalHead.earthWaterChart("202001_202212",indexId)
    data =  CalHead.earthWaterChart(model_name,indexId)
    return jsonify(data)
#渗透系数
@app.route('/mdLpf', methods=['GET'])
def mdLpf():
    lf = base.md_lpf
    return jsonify(np.array(lf).tolist())
#水资源量
@app.route('/water_res', methods=['GET'])
def water_res():
    model_name = request.args.get('model_name')
    #水均衡
    path1=base.muiltyModel + model_name +"\\water_bal.txt"
    bal = CalHead.water_balance(model_name, path1)
    path2=base.muiltyModel + model_name +"\\water_res.txt"
    res =  CalHead.water_res(model_name,path2)
    dicts =  BigData.mergeWaterData(bal,res)
    initFlowPNG = achiveReport.getWaterResFiled(model_name,0)
    dicts["initFlowPNG"]= "/xishan/xinshanFlow/" + initFlowPNG
    FlowPNG2 = achiveReport.getWaterResFiled(model_name,11)
    dicts["lastFlowPNG"]= "/xishan/xinshanFlow/" + FlowPNG2
    dicts["initMonth"] ="2023-01"
    dicts["lastMonth"] ="2023-12"
    #水资源量
    return dicts
#多模型地下水位
@app.route('/water_depth', methods=['GET'])
def water_depth():
    model_name = request.args.get('model_name')
    result = CalHead.water_depth(model_name)
    #水资源量
    return result
#小场景 å•个模型水位 å’Œå˜å¹…
@app.route('/xs_depth', methods=['GET'])
def xs_depth():
    model_name = request.args.get('model_name')
    res = CalHead.xs_depth(model_name)
    jsondata= CalHead.get_model_json(model_name)
    start_time = jsondata["start_time"]
    end_time = jsondata["end_time"]
    months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
    res["months"] = months
    #水资源量
    return res
#保存监测站降雨量
@app.route('/sensor_jyl_list', methods=['GET'])
def sensor_jyl_list():
    model_name = request.args.get('model_name')
    data = OpenExcel.read_excel(model_name)
    return jsonify(data)
#保存监测站降雨量
@app.route('/sensor_jyl_save', methods=['POST'])
def sensor_jyl_save():
    json = request.get_json()
    model_name = str(json['model_name'])
    data = json['data']
    OpenExcel.write_excel(model_name,data)
    return jsonify("保存完毕")
#月报内容
@app.route('/xs_month_report', methods=['GET'])
def xs_month_report():
    model_name = request.args.get('model_name')
    per = int(request.args.get('period'))
    res = achiveReport.archive_report_content(model_name,per)
    return res
#实时数据
@app.route('/xs_real_data', methods=['GET'])
def xs_real_data():
    num = request.args.get('num')
    start_time = request.args.get('start_time')
    end_time = request.args.get('end_time')
    types = request.args.get('types')
    res = DataTask.get_data(types,num,start_time,end_time)
    return jsonify(res)
    
if __name__ == '__main__':
    #app.run()    # å¯ä»¥æŒ‡å®šè¿è¡Œçš„主机IP地址,端口,是否开启调试模式
    app.run(host="192.168.0.122", port=5000)
    app.run(host="localhost", port=5000)
ModelPeriod.py
@@ -7,10 +7,6 @@
from datetime import datetime 
import calendar
from dateutil import rrule
import json
import Base as base
import os
#根据年月日获取最后一天的日期
@@ -26,7 +22,8 @@
    last_day= last_day_of_month(int(start_date[0]),int(start_date[1]),1)
    return last_day
    
def get_months_in_year():
    return ["1月","2月","3月","4月","5月","6月","7月","8月","9月","10月","11月","12月",]
def get_months_in_range_ym(start_time, end_time):  
OpenExcel.py
New file
@@ -0,0 +1,61 @@
from openpyxl import load_workbook
import os
import shutil
import Base as base
def save_excel(model_name):
   dst = base.model_dir + model_name +"\\jyl.xlsx"
   shutil.copyfile(base.prefix +"雨量站输入模板.xlsx",dst)
#读取excel模板数据
def read_excel(model_name):
    paths =  base.model_dir + model_name +"\\jyl.xlsx"
    data=[]
    if not os.path.exists(paths):
        return data
    wb = load_workbook(filename = paths)
    ws = wb[wb.sheetnames[1]]
    for row in ws.iter_rows():
       tmp =[]
       for cell in row:
          tmp.append(cell.value)
       data.append(tmp)
    wb.close()
    return data
#读取降雨量的计算文件
def read_jyl_excel(model_name):
    paths =  base.model_dir + model_name +"\\jyl.xlsx"
    data=[]
    if not os.path.exists(paths):
        return data
    wb = load_workbook(filename = paths,data_only=True)
    ws = wb[wb.sheetnames[2]]
    for row in ws.iter_rows():
       tmp =[]
       for cell in row:
          tmp.append(cell.value)
       data.append(tmp)
    wb.close()
    return data
def write_excel(model_name,data):
    paths =  base.model_dir + model_name +"\\jyl.xlsx"
    if not os.path.exists(paths):
        save_excel(model_name)
    wb = load_workbook(filename = paths)
    ws = wb[wb.sheetnames[1]]
    for i in range(len(data)):
        for j in range(len(data[i])):
            ws.cell(row=i+1, column=j+1).value = data[i][j]
    wb.save(paths)
    wb.close()
Predict - ¸±±¾.py
File was deleted
Predict.py
@@ -1,23 +1,14 @@
# å¯¼å…¥Flaskç±»
from flask import Flask
from flask import jsonify
from flask import request
from flask_cors import CORS
import sys
import numpy as np
import pandas as pd
import flopy
import flopy.utils.binaryfile as bf
import csv
import time
from openpyxl import load_workbook
import os
import shutil
import json
import Base as base
import CalHead
import ModelPeriod
import OpenExcel
base_init_year=["2020","2021","2022"]
@@ -30,16 +21,15 @@
#降水量
base_water = base.prefix + 'base_water.ini'
def predict_water_chart(base_year,start_time ,end_time):
def predict_water_chart(base_year,start_time ,end_time,value):
     water_array = np.loadtxt(base_water, dtype=str,encoding='utf-8')
     y_data=[]
     x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12")
     water= water_array[0]
     for e in water:
         y_data.append(e)
     x_data= ModelPeriod.get_months_in_year()
     water= water_array[int(base_year)]
     for data in water:
         float_data = float(data)/9
         float_data= round(float_data*value,2)
         y_data.append(float_data)
    
     result = {"y_data": y_data, "x_data": x_data}
     return result
@@ -47,18 +37,71 @@
#河流的折线图
base_river = base.prefix + 'base_river.ini'
def predict_river_chart(base_year,start_time ,end_time):
def predict_river_chart(base_year,start_time ,end_time,value):
     river_array = np.loadtxt(base_river, dtype=str,encoding='utf-8')
     y_data=[]
     x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12")
     for e in river_array:
         y_data.append(e)
     x_data= ModelPeriod.get_months_in_year()
     for data in river_array:
         #万立方米
         float_data = float(data)/4/10000
         float_data= round(float_data*value,2)
         y_data.append(float_data)
         
     result = {"y_data": y_data, "x_data": x_data}
     return result
#开采量折线图
base_mining = base.prefix + 'base_mining.ini'
def predict_well_chart(base_year,start_time ,end_time,data):
     #   2022/海淀/昌平/门头沟/石景山
     area_names=["全部区域","海淀区","昌平区","门头沟区","石景山区"]
     mining_array = np.loadtxt(base_mining, dtype=str,encoding='utf-8')
     x_data= ModelPeriod.get_months_in_year()
     result = {"x_data": x_data}
     y_data=[]
     for item in data:
         name = item["area_name"]
         value = float(item["value"])
         if name =='朝阳区':
             continue
         index = area_names.index(name)
         row_data = mining_array[index]
         float_data=[]
         for item in row_data:
             x = round(float(item)/10000*value,2)
             float_data.append(x)
         dicts={"name":name,"data":float_data}
         y_data.append(dicts)
     result["y_data"] = y_data
     return result
def run_model(model_name):
    if model_name == base.not_allowed_model or model_name in base.archive_models:
        return "本模型为验证模型,不允许修改!"
    dicts= run_model_predict(model_name)
    if dicts["code"] == 400:
        return dicts
    #导出csv文件
    CalHead.exportCsV(model_name)
    # #更新模型三维网格配置
    base.updateModelConfig(model_name)
    # #创建模型的三维网格
    filedir = base.model3d_path + model_name
    if not os.path.exists(filedir):
        os.makedirs(filedir, exist_ok=True)
    base.callModelexe()
    #计算水资源量和水均衡
    CalHead.run_zonebudget_bal(model_name)
    CalHead.run_zonebudget_res(model_name)
    dicts= {"code":200,"msg":"预测模型运行成功!" }
    return dicts
def run_model_predict(model_name):
@@ -69,21 +112,29 @@
        with open(prediction_path,encoding='utf-8') as f:
             predictiondata = json.load(f)
    
    periods =CalHead.get_model_period(model_name)
    periods_len= len(periods)
    if predictiondata:
        updateDisFile(model_name,periods_len)
        try:
           flag =  updateBase6File(model_name,predictiondata)
           if bool(flag)==False:
                dicts= {"code":400,"msg":"请检查模型的初始水是否设置!" }
                return  dicts
        except:
             dicts= {"code":400,"msg":"请检查模型的初始水是否设置!" }
             return  dicts
                    
        try:
            updateDisFile(model_name,predict_per)
            updateBase6File(model_name,predictiondata)
            updateRchFile(model_name,predictiondata)
        except:
            print("RchFile无预测参数,无需修改!")
            
        try:
            updateRiverFile(model_name,predictiondata)
        except:
            return "请检查初始水头、降水量、永定河入渗量、开采量等参数是否填写完整!"
            print("RiverFile无预测参数,无需修改!")
    else:
        print("prediction.json é¢„测场景文件为空,无需更改相应文件")
    
@@ -93,7 +144,8 @@
    ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
                                     exe_name="mf2005", verbose=True,  version="mf2005", check=False)   
    ml.run_model(report = True)
    return "预测模型运行成功!"
    dicts= {"code":200,"msg":"预测模型运行成功!" }
    return dicts
    
@@ -142,11 +194,21 @@
        
        rain_ratio = float(predictiondata["rain"]["ratio"])
        rain_base_year = predictiondata["rain"]["base_year"]
        if rain_base_year=='4':
            rain_base_year="1"
        
        river_ratio= float(predictiondata["river"]["ratio"])
        area= predictiondata["mine"]["area"] 
        
        # ws = base.predictParamModel + rain_base_year
        ws=""
        start_time = predictiondata["start_time"]
        end_time =  predictiondata["end_time"]
        count = ModelPeriod.get_months_in_range_count(start_time, end_time)
        if count==12:
        ws = base.predictParamModel + rain_base_year
        else:
             ws=  base.predictModel60
        
        baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= ws,
                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
@@ -161,7 +223,10 @@
        
        lrcq = {}
        
        for per in range(predict_per):
        periods =CalHead.get_model_period(model_name)
        periods_len= len(periods)
        for per in range(periods_len):
            wel = [] 
            array2d = [] 
            
@@ -188,14 +253,21 @@
                array = [Layer, Row, Column, Q]
                array2d.append(array)
                
            #追加井
            flex_data= getFlexdata(model_name)
            print("==============")
            print(flex_data)
    
            for i in range(len(flex_data)):
                array2d.append(flex_data[i])
          
            lrcq[per] = array2d 
            
        flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq)
        flopy.modflow.ModflowWel(updateMdoel,
                                 ipakcb= baseMdoel.wel.ipakcb,
                                 dtype=baseMdoel.wel.dtype,
                                 options=baseMdoel.wel.options,
                                 stress_period_data=lrcq)
        updateMdoel.write_input()          
                               
    else:     
@@ -227,10 +299,29 @@
    if flag == "true":
         #丰水年 æž¯æ°´å¹´
        base_year = predictiondata["rain"]["base_year"]  
        ratio= float(predictiondata["rain"]["ratio"])
        if base_year =="1" or base_year =="2" or base_year =="3":
            updateRchBaseYear(model_name,predictiondata)
        elif  base_year =="4":
            #接入传感器数据
            updateRchRealData(model_name,predictiondata)
        
    else:
        print("Rch文件无需修改!")
#更新平水年 ä¸°æ°´å¹´ æž¯æ°´å¹´æ•°æ®
def updateRchBaseYear(model_name,predictiondata):
      #丰水年 æž¯æ°´å¹´
        base_year = predictiondata["rain"]["base_year"]
        ratio= float(predictiondata["rain"]["ratio"])
         #数据来源的模型文件夹
        base_ws=""
        start_time = predictiondata["start_time"]
        end_time =  predictiondata["end_time"]
        count = ModelPeriod.get_months_in_range_count(start_time, end_time)
        if count==12:
        base_ws=  base.predictParamModel + base_year
        else:
             base_ws=  base.predictModel60
        
        baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base_ws,
                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
@@ -239,28 +330,107 @@
        updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
        
        for per in range(predict_per):
            
        periods =CalHead.get_model_period(model_name)
        periods_len= len(periods)
        for per in range(periods_len):
            item = baseMdoel.rch.rech.__getitem__(kper = per)
            array2d = item.get_value()
            array2d_len = len(array2d)
            
            for i in range(array2d_len):
                  array_len = len(array2d[i])
                  for j in range(array_len):
                      if str(base.area_array[i][j]) != '-9999':
                          array2d[i][j] =  array2d[i][j] * ratio
                           
            updateMdoel.rch.rech.__setitem__(key = per, value=array2d) 
        rch = flopy.modflow.ModflowRch(updateMdoel,
                                       nrchop=baseMdoel.rch.nrchop,
                                       ipakcb=baseMdoel.rch.ipakcb,
                                       rech=updateMdoel.rch.rech,
                                       irch =baseMdoel.rch.irch)
          
        rch = flopy.modflow.ModflowRch(updateMdoel, rech=updateMdoel.rch.rech)
        rch.write_file(check=False)
        
#接入传感器数据
def updateRchRealData(model_name,predictiondata):
       # å–平水年的rch文件
        base_year = "1"
        ratio= float(predictiondata["rain"]["ratio"])
        if not ratio:
            ratio = 1
        excel_data = OpenExcel.read_jyl_excel(model_name)
        array_data =[]
        if not excel_data:
            print("降水模板数据未获取到!")
            return "降水模板数据未获取到!"
        for i in range(1,len(excel_data)):
            temp =[]
            for j in range(1,len(excel_data[i])):
                data = round(float(excel_data[i][j]),8)
                temp.append(data)
            array_data.append(temp)
        base_ws=""
        start_time = predictiondata["start_time"]
        end_time =  predictiondata["end_time"]
        count = ModelPeriod.get_months_in_range_count(start_time, end_time)
        if count==12:
             base_ws=  base.predictParamModel + base_year
    else:   
        print("Rch文件无需修改!")
             base_ws=  base.predictModel60
        baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base_ws,
                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
        update_model_ws = base.model_dir + model_name
        updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
        #降雨分区
        area_dicts = base.getAreas()
        periods =CalHead.get_model_period(model_name)
        periods_len= len(periods)
        for per in range(periods_len):
            #16个分区某个周期的 æ•°æ® ä¸‹æ ‡(0,15) å’Œarea_dicts字典降雨分区的key对应
            water_per_data = get_Array2_column(array_data,per)
            item = baseMdoel.rch.rech.__getitem__(kper = per)
            array2d = item.get_value()
            for key in area_dicts:
                tuples= area_dicts[key]
                values = water_per_data[int(key)]
                for i in range(len(tuples)):
                    x = tuples[i][0]
                    y = tuples[i][1]
                    array2d[x][y]= values*ratio
            updateMdoel.rch.rech.__setitem__(key = per, value=array2d)
        rch = flopy.modflow.ModflowRch(updateMdoel,
                                       nrchop=baseMdoel.rch.nrchop,
                                       ipakcb=baseMdoel.rch.ipakcb,
                                       rech=updateMdoel.rch.rech,
                                       irch =baseMdoel.rch.irch)
        rch.write_file(check=False)
        print("降水分区数据更新完毕!")
        return "降水分区数据更新完毕!"
#获取列数据
def get_Array2_column(array_data,column):
    arr = np.array(array_data)
    column_data = arr[:, column]
    return column_data
        
def check_rain_param(predictiondata):
    
@@ -287,18 +457,18 @@
     model_ws = base.model_dir + model_name
     ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
                                    exe_name="mf2005", verbose=True,  version="mf2005", check=False)
     #初始水头
     if "initHeader" not in predictdata:
         print("=============has no  initHeader=============")
         return False
     init_header = predictdata["initHeader"]
     
     dir = base.model_dir + init_header + "\\modflow.head"
     head = bf.HeadFile(dir)
     alldata = head.get_alldata()
     
     lens = len(alldata)
     last_index = lens-3
     last_index = len(alldata)-1
     last_array3= alldata[last_index]
     strt = ml.bas6.strt
@@ -317,6 +487,7 @@
          extension="bas6",)
     
     mfBase6.write_file(check=False)
     return True
#修改dis æ–‡ä»¶
@@ -345,3 +516,5 @@
        extension="dis")
    mfDis.write_file(check=False)
WaterXBL.py
New file
@@ -0,0 +1,77 @@
import flopy.utils.binaryfile as bf
import Base as base
import numpy as np
#给水度
water_u = 0.2
#研究区域面积平方米
water_F= float(5652 * 500 *500)
#有效计算单元格
water_invalid_cell =5652
#研究区域平方千米
water_F_KM= 680.250
pyq_cells_total =2721
#地下水蓄变量
def get_grd_storage(model_name,per1,per2):
    arr = np.loadtxt(base.xs_mp_path, dtype=int)
    pyq_cells=[]
    k=0
    for i in range(len(arr)):
           for j in range(len(arr[i])):
               if arr[i][j] == 1:
                   k+=1
                   pyq_cells.append((i,j))
    dir = base.model_dir + model_name  + "\\modflow.head"
    if model_name=="202001_202212":
         dir = base.baseModel2  + "\\modflow.head"
    head = bf.HeadFile(dir)
    alldata = head.get_alldata()
    #初始水位数据
    z_start = alldata[int(per1+1)*3-3,0,:,:]
    #结束水位数据
    z_end = alldata[int(per2+1)*3-1,0,:,:]
    z_start[(z_start<=0)] = 0
    z_end[(z_end<=0)] = 0
    # z_start_avg = float(np.sum(z_start)/5652)
    # z_end_avg = float(np.sum(z_end)/5652)
    z_start_total = 0
    z_end_total = 0
    for item in pyq_cells:
        i = item[0]
        j = item[1]
        z_start_total  += z_start[i,j]
        z_end_total += z_end[i,j]
    z_start_avg = z_start_total/2721
    z_end_avg = z_end_total/2721
    #ΔW=100·(h1-h2)·μ·F/t
    year = (per2+1-per1)/12
    # print(year)
    storage = 100 * (z_start_avg-z_end_avg) * water_u * water_F_KM /year
    return storage
#相对均衡差 Q1
#Q总补-Q总排±ΔW=Χ
# Î§/Q总补 *100%
def get_grd_relative_equ(Q1, Q2,w):
   x=  ( Q1-Q2) + w
   y =  x/Q1
   return y
main.py
New file
@@ -0,0 +1,60 @@
# å¯¼å…¥Flaskç±»
from flask import Flask
from flask import jsonify
from flask import request
from flask_cors import CORS
import pymssql
# Flask函数接收一个参数__name__,它会指向程序所在的包
app = Flask(__name__)
CORS(app, supports_credentials=True, resources=r'/*')
server = '192.168.0.123:1433'
user='sa'
password='admin123X'
database ='microseism3'
#根据月份获取数据
def get_event_location_data(month):
    conn = pymssql.connect(server=server, user=user, password=password, database=database,as_dict=True)
    cursor = conn.cursor()
    res =[]
    try:
        sqlStr = 'SELECT * FROM dbo.event_location_'+ str(month)
        # æ‰§è¡ŒæŸ¥è¯¢è¯­å¥æˆ–其他操作
        cursor.execute(sqlStr)
        # èŽ·å–ç»“æžœé›†
        result = cursor.fetchall()
        for row in result:
           dic={"x":row["Event_X"],"y":row["Event_Y"],"z":row["Event_Z"],"v":row["Event_Energy"]}
           res.append(dic)
    except Exception as e:
        print("Error occurred:", str(e))
        return []
    finally:
        # å…³é—­è¿žæŽ¥
        cursor.close()
        conn.close()
    return res
#根据传入的月份查询数据
@app.route('/get_event_location_data', methods=['GET'])
def event_location_data():
    month = request.args.get('month')
    res =  get_event_location_data(month)
    return jsonify(res)
if __name__ == '__main__':
    #app.run()    # å¯ä»¥æŒ‡å®šè¿è¡Œçš„主机IP地址,端口,是否开启调试模式
    app.run(host="192.168.0.107", port=8080)
test.py
File was deleted