获取openfalcon历史数据

Posted oracle614

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了获取openfalcon历史数据相关的知识,希望对你有一定的参考价值。

!/usr/bin/python

# --*-- coding: utf-8 --*--

# 获取用户画像集群平均qps,高峰qps,请求数,超过10ms的比例,平均响应时间 按照每天计算

import requests

import time

import json

 

# 登录url

login_url = ‘http://192.168.10.2:8080/api/v1/user/login‘

 

# 历史数据url

history_url = "http://192.168.10.2:8080/api/v1/graph/history"

 

def get_sig(url):

"""返回openfalcon用户sig标记,否则无法拿到历史数据

result_json: 返回请求数据json格式

result:json格式转化成字典

sig: 用户标签

user: 用户信息

"""

user = {

‘name‘: "xxx",

"password": "xxx"

}

 

result_json = requests.post(url,data=user)

result = json.loads(result_json.text)

sig = result[‘sig‘]

return sig

 

class nginx_analysis(object):

def __init__(self, sig, user, url):

self.sig = sig

self.user = user

self.url = url

self.api_token = ‘{"name":"‘ + self.user + ‘", "sig":"‘ + self.sig + ‘"}‘

 

self.falcon_header = {

"Apitoken": self.api_token,

"X-Forwarded-For": "127.0.0.1",

"Content-Type": "application/json",

"name": self.user,

"sig": self.sig

}

 

def get_nginx_data(self):

"""获取nginx数据

因为上报到openfalcon的endpoints名字不同,所以上传两次数据

payload1: 获取所有主机的响应时间和每秒请求数

payload2: 获取所有主机请求超过10ms的比例

current_time: 当前时间

history_time: 24h前的时间

"""

current_time = int(time.time())

history_time = current_time - 86400

 

payload1 = {

"step": 60,

"start_time": history_time,

"hostnames": [

"hostname1",

"hostname2"

],

"end_time": current_time,

"counters": [

"performance/name=nginx_request_time",

],

"consol_fun": "AVERAGE"

}

 

payload2 = {

"step": 60,

"start_time": history_time,

"hostnames": [

"hostname1",

"hostname2"

],

"end_time": current_time,

"counters": [

"performance/name=nginx_accepts_handled_requests_sec"

],

"consol_fun": "AVERAGE"

}

 

payload3 = {

"step": 60,

"start_time": history_time,

"hostnames": [

"hostname3",

"hostname4"

],

"end_time": current_time,

"counters": [

"nginx_request_percent/nginx_request_percent=10ms"

],

"consol_fun": "AVERAGE"

}

 

 

params = {

‘url‘: self.url,

‘headers‘: self.falcon_header,

‘timeout‘: 30

}

 

# 上报数据放入数组

post_data_list = [payload1,payload2,payload3]

 

# 返回数据放入数组

response_data_list = []

 

for i in post_data_list:

params[‘data‘] = json.dumps(i)

result_json = requests.post(**params)

result = json.loads(result_json.text)

response_data_list.append(result)

 

return response_data_list

 

def get_result(self,origin_data_list):

"""分析最终结果

data: 传递的原始数据

"""

#print len(origin_data_list)

# 获取所有节点nginx响应时间

nginx_request_time = {}

 

# 获取所有节点每秒请求次数

nginx_accepts_handled_requests_sec = {}

 

# 获取所有节点超过10ms时间

nginx_request_percent = {}

 

##############计算响应时间##################################

for data in origin_data_list[0]:

key = data["endpoint"]

values = data["Values"]

count = len(values)

# 每个节点的响应时间和

response_time_sum = 0

 

for i in values:

if i[‘value‘]:

response_time_sum += float(i[‘value‘])

# 每个节点的平均响应时间

per_sec_time = response_time_sum/(count-1)

nginx_request_time[key] = float(‘%.1f‘ % per_sec_time)

 

#所有节点的平均响应时间

avg_response_time = 0

 

#临时响应时间和

tmp_response_sum = 0

for key,value in nginx_request_time.items():

tmp_response_sum += value

 

avg_response_time = tmp_response_sum/len(nginx_request_time)

avg_response_time = float(‘%.2f‘ % avg_response_time)

#print avg_response_time

 

################################################################

 

######################计算每秒请求数###########################

# 存放所有主机的请求数值

high_list = []

for data in origin_data_list[1]:

key = data["endpoint"]

values = data["Values"]

count = len(values)

# 每个节点的每秒请求数

qps_sum = 0

 

# 存放每个主机的请求数值

per_high_list = []

for i in values:

if i[‘value‘]:

per_high_list.append(i[‘value‘])

qps_sum += float(i[‘value‘])

# 每个节点的平均响应时间

high_list.append(max(per_high_list))

per_qps = qps_sum/(count-1)

nginx_accepts_handled_requests_sec[key] = float(‘%.2f‘ % per_qps)

 

#所有节点的平均qps

avg_qps = 0

 

#临时qps和

tmp_qps_sum = 0

for key,value in nginx_accepts_handled_requests_sec.items():

tmp_qps_sum += value

 

avg_qps = tmp_qps_sum/len(nginx_accepts_handled_requests_sec)

# 每天请求数

request_sum = int(avg_qps * 14 * 86400)

avg_qps = int(avg_qps*14)

 

# 高峰qps

high_qps = int(sum(high_list))

#print high_list,high_qps

#print request_sum, avg_qps,high_qps

##############################################################

 

#################################超过10ms的占比##############

for data in origin_data_list[2]:

key = data["endpoint"]

values = data["Values"]

count = len(values)

# 每个主机超过10ms占比和

over_percent_sum = 0

for i in values:

if i[‘value‘]:

over_percent_sum += float(i[‘value‘])

# 每个节点超过10ms占比

per_over_percent = over_percent_sum/(count-1)

nginx_request_percent[key] = float(‘%.1f‘ % per_over_percent)

 

#所有节点的平均超过10ms占比

avg_over_percent = 0

 

#临时超过10ms占比和

tmp_over_percent_sum = 0

for key,value in nginx_request_percent.items():

tmp_over_percent_sum += value

 

avg_over_percent = tmp_over_percent_sum/len(nginx_request_percent)

avg_over_percent = float(‘%.2f‘ % avg_over_percent)

#print avg_over_percent

 

############################################################

nginx_data = [avg_response_time,request_sum, avg_qps,high_qps,avg_over_percent]

return nginx_data

 

def main():

sig = get_sig(login_url)

# 创建对象

nginx_analysis = Nginx_analysis(sig,‘xxx‘,history_url)

 

# 获取原始数据

nginx_origin_data = nginx_analysis.get_nginx_data()

nginx_analisy = nginx_analysis.get_result(nginx_origin_data)

print nginx_analisy

 

if __name__ == ‘__main__‘:

main()

以上是关于获取openfalcon历史数据的主要内容,如果未能解决你的问题,请参考以下文章

云原生架构-监控告警从openfalcon到Prometheus

openfalcon源码分析之graph

openfalcon源码分析之agent

OpenFalcon简介

openfalcon源码分析之transfer

openfalcon源码分析之Judge