在批量统计服务器的历史状态时,去web一个一个查看、计算、统计、汇总效率非常低下;批量添加主机也是同样,下面是统计历史信息的python脚本:
注意在history.get时如果数据的历史保存天数1wek以前没有数据,history.get会获取到空值,可以使用TREND.GET代替.以下脚本在python3下运行没问题
# -*- coding:utf-8 -*-
import requests
import json
import csv
import time
key="vm.memory.size[available]"
mem_total_key="vm.memory.size[total]"
cpu_idle_key="system.cpu.util[,idle]"
# ip.txt文件每行存储一个agent的配置文件中的Hostname,必须严格对应,否则会返回空数据导致运行错误
hostfile="ip.txt"
#datafile用于保存输出结果
datafile="data_mem.txt"
url = 'http://你的zabbix的访问IP:端口/zabbix/api_jsonrpc.php'
post_headers = {'Content-Type': 'application/json'}
starttime = "2020-10-28 00:00:00"
stoptime = "2020-11-04 00:00:00"
post_data = {
"jsonrpc" : "2.0",
"method" : "user.login",
"params" : {
"user" : "你的zabbix账号",
"password" : "你的zabbix的密码"
},
"id" : 1
}
ret = requests.post(url, data = json.dumps(post_data), headers = post_headers)
auth_key=json.loads(ret.text)["result"]
def timecovert(stringtime):
timeArray = time.strptime(stringtime, "%Y-%m-%d %H:%M:%S")
timeStamp = int(time.mktime(timeArray))
return timeStamp
start = timecovert(starttime)
stop = timecovert(stoptime)
def get_hostid(post_headers,url, auth_key, host):
data = {
"jsonrpc": "2.0",
"method": "host.get",
"params": {
"host": host,
},
"auth": auth_key,
"id": 1
}
request = requests.post(url, data=json.dumps(data), headers=post_headers)
dict = json.loads(request.content)
return(dict)
def get_itemid(post_headers,url, auth_key, host, itemkey):
data = {
"jsonrpc": "2.0",
"method": "item.get",
"params": {
#host为被监控机器ip地址
"host": host,
"search": {
"key_": itemkey
},
},
"auth": auth_key,
"id": 1
}
request = requests.post(url, data=json.dumps(data), headers=post_headers)
dict = json.loads(request.content)
return dict['result'][0]['itemid']
def get_history_data(post_headers, url, auth_key,history , itemids, start, stop):
post_data = {
"jsonrpc" : "2.0",
"method" : "history.get",
"params" : {
#history的值0代表监控项的值为numeric float浮点数, 1为character字符, 2为log, 3为无符号数字(正整数),4为text.默认值为3(如果history值与监控项目采集的数值对应不上返回空[])
"history": history,
#"itemids": ["92146"],
"itemids": itemids,
"output":"extend",
"time_from": start,
"time_till": stop
},
"auth":auth_key,
"id":1
}
ret = requests.post(url, data = json.dumps(post_data), headers = post_headers)
return json.loads(ret.text)
def get_history_data_limit(post_headers, url, auth_key, itemids, start, stop,limit):
post_data = {
"jsonrpc" : "2.0",
"method" : "history.get",
"params" : {
#history的值0代表监控项的值为numeric float浮点数, 1为character字符, 2为log, 3为无符号数字(正整数),4为text.默认值为3(如果history值与监控项目采集的数值对应不上返回空[])
"history": 3,
"itemids": itemids,
"output":"extend",
#"time_from": start,
#"time_till": stop,
"limit": 1
},
"auth":auth_key,
"id":1
}
ret = requests.post(url, data = json.dumps(post_data), headers = post_headers)
return json.loads(ret.text)
fdata = open(datafile,'w')
with open(hostfile, "r") as f:
f1 = f.readlines()
for ips in f1:
#host是ip地址格式
host = ips.replace('\n', '').strip()
#获取机器的内存总大小
host_memtotal_itemid=get_itemid(post_headers, url, auth_key, host , mem_total_key)
data_mem_total = get_history_data_limit(post_headers, url, auth_key, host_memtotal_itemid, start, stop,1)
hostMemTotal = float(data_mem_total['result'][0]['value'])
#获取历史数据
host_itemid=get_itemid(post_headers, url, auth_key, host , key)
data = get_history_data(post_headers, url, auth_key, 3, host_itemid, start, stop)
if data:
valuelist = []
for i in data['result']:
valuelist.append(int(i["value"]))
avge = sum(valuelist)/len(valuelist)
avge_percentage = (hostMemTotal - avge)*100/hostMemTotal
min1 = min(valuelist)
max_percentage=(hostMemTotal-min1)*100/hostMemTotal
fdata.write('mem: '+host + ',内存平均使用率平均值:' + str(round(avge_percentage,2)) + ',内存使用率峰值:' + str(round(max_percentage,2)) + '\n')
#使用cpu_idle_key计算linux 的cpu利用率
host_itemid=get_itemid(post_headers, url, auth_key, host , cpu_idle_key)
data = get_history_data(post_headers, url, auth_key, 0, host_itemid, start, stop)
if data:
valuelist = []
for i in data['result']:
valuelist.append(float(i["value"]))
avge = sum(valuelist)/len(valuelist)
avge_percentage = (100 - avge)
min1 = min(valuelist)
max_percentage=(100-min1)
fdata.write('mem: '+host + ',CPU平均使用率平均值:' + str(round(avge_percentage,2)) + ',CPU使用率峰值:' + str(round(max_percentage,2)) + '\n')
fdata.close()
# -*- coding:utf-8 -*-
import requests
import json
import csv
import time
key="vm.memory.size[available]"
mem_total_key="vm.memory.size[total]"
cpu_idle_key="system.cpu.util[,idle]"
# ip.txt文件每行存储一个agent的配置文件中的Hostname,必须严格对应,否则会返回空数据导致运行错误
hostfile="ip.txt"
#datafile用于保存输出结果
datafile="data_mem.txt"
url = 'http://10.0.0.180/zabbix/api_jsonrpc.php'
post_headers = {'Content-Type': 'application/json'}
starttime = "2020-10-01 00:00:00"
stoptime = "2020-10-02 00:00:00"
post_data = {
"jsonrpc" : "2.0",
"method" : "user.login",
"params" : {
"user" : "admin",
"password" : "zabbix"
},
"id" : 1
}
ret = requests.post(url, data = json.dumps(post_data), headers = post_headers)
auth_key=json.loads(ret.text)["result"]
def timecovert(stringtime):
timeArray = time.strptime(stringtime, "%Y-%m-%d %H:%M:%S")
timeStamp = int(time.mktime(timeArray))
return timeStamp
start = timecovert(starttime)
stop = timecovert(stoptime)
def get_hostid(post_headers,url, auth_key, host):
data = {
"jsonrpc": "2.0",
"method": "host.get",
"params": {
"host": host,
},
"auth": auth_key,
"id": 1
}
request = requests.post(url, data=json.dumps(data), headers=post_headers)
dict = json.loads(request.content)
return(dict)
def get_itemid(post_headers,url, auth_key, host, itemkey):
data = {
"jsonrpc": "2.0",
"method": "item.get",
"params": {
#host为被监控机器ip地址
"host": host,
"search": {
"key_": itemkey
},
},
"auth": auth_key,
"id": 1
}
request = requests.post(url, data=json.dumps(data), headers=post_headers)
dict = json.loads(request.content)
return dict['result'][0]['itemid']
def get_history_data(post_headers, url, auth_key,history , itemids, start, stop):
post_data = {
"jsonrpc" : "2.0",
"method" : "trend.get",
"params" : {
#history的值0代表监控项的值为numeric float浮点数, 1为character字符, 2为log, 3为无符号数字(正整数),4为text.默认值为3(如果history值与监控项目采集的数值对应不上返回空[])
"history": history,
#"itemids": ["92146"],
"itemids": itemids,
"output":"extend",
"time_from": start,
"time_till": stop
},
"auth":auth_key,
"id":1
}
ret = requests.post(url, data = json.dumps(post_data), headers = post_headers)
print(json.loads(ret.text))
return json.loads(ret.text)
def get_history_data_limit(post_headers, url, auth_key, itemids, start, stop,limit):
post_data = {
"jsonrpc" : "2.0",
#"method" : "history.get",
"method" : "trend.get",
"params" : {
#history的值0代表监控项的值为numeric float浮点数, 1为character字符, 2为log, 3为无符号数字(正整数),4为text.默认值为3(如果history值与监控项目采集的数值对应不上返回空[])
"history": 3,
"itemids": itemids,
"output":"extend",
#"time_from": start,
#"time_till": stop,
"limit": 1
},
"auth":auth_key,
"id":1
}
ret = requests.post(url, data = json.dumps(post_data), headers = post_headers)
print(json.loads(ret.text))
return json.loads(ret.text)
fdata = open(datafile,'w')
with open(hostfile, "r") as f:
f1 = f.readlines()
for ips in f1:
#host是ip地址格式
host = ips.replace('\n', '').strip()
#获取机器的内存总大小
host_memtotal_itemid=get_itemid(post_headers, url, auth_key, host , mem_total_key)
data_mem_total = get_history_data_limit(post_headers, url, auth_key, host_memtotal_itemid, start, stop,1)
hostMemTotal = float(data_mem_total['result'][0]['value_avg'])
#获取历史数据
host_itemid=get_itemid(post_headers, url, auth_key, host , key)
data = get_history_data(post_headers, url, auth_key, 3, host_itemid, start, stop)
if data:
avg_valuelist = []
min_valuelist = []
for i in data['result']:
avg_valuelist.append(int(i["value_avg"]))
min_valuelist.append(int(i["value_min"]))
avge = sum(avg_valuelist)/len(avg_valuelist)
avge_percentage = (hostMemTotal - avge)*100/hostMemTotal
min1 = min(min_valuelist)
max_percentage=(hostMemTotal-min1)*100/hostMemTotal
fdata.write('mem: '+host + ',内存平均使用率平均值:' + str(round(avge_percentage,2)) + ',内存使用率峰值:' + str(round(max_percentage,2)) + '\n')
#使用cpu_idle_key计算linux 的cpu利用率
host_itemid=get_itemid(post_headers, url, auth_key, host , cpu_idle_key)
data = get_history_data(post_headers, url, auth_key, 0, host_itemid, start, stop)
if data:
avg_valuelist = []
min_valuelist = []
for i in data['result']:
avg_valuelist.append(float(i["value_avg"]))
min_valuelist.append(float(i["value_min"]))
avge = sum(avg_valuelist)/len(avg_valuelist)
avge_percentage = (100 - avge)
min1 = min(min_valuelist)
max_percentage=(100-min1)
fdata.write('mem: '+host + ',CPU平均使用率平均值:' + str(round(avge_percentage,2)) + ',CPU使用率峰值:' + str(round(max_percentage,2)) + '\n')
fdata.close()