gradient函数

Posted pencil2001

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了gradient函数相关的知识,希望对你有一定的参考价值。

import numpy as np
import numpy.core.numeric as _nx


def gradient(f, *varargs, **kwargs):
print("***************************************")
# 将类数组转换成数组
f = np.asanyarray(f)
# 获取维数
N = f.ndim # number of dimensions

# 有没有指定计算的轴数,默认为空
axes = kwargs.pop(‘axis‘, None)
if axes is None:
# 如果为空,那么计算每一轴的梯度
axes = tuple(range(N))
else:
# 如果不为空,将其转换成非负整数轴数列表,比如,axis=(-2,-1) -> (0, 1)
axes = _nx.normalize_axis_tuple(axes, N)

# 待计算的轴数
len_axes = len(axes)
# 可选参数长度,不解析,默认为0
n = len(varargs)
if n == 0:
# no spacing argument - use 1 in all axes
dx = [1.0] * len_axes
elif n == 1 and np.ndim(varargs[0]) == 0:
# single scalar for all axes
dx = varargs * len_axes
elif n == len_axes:
# scalar or 1d array for each axis
dx = list(varargs)
for i, distances in enumerate(dx):
if np.ndim(distances) == 0:
continue
elif np.ndim(distances) != 1:
raise ValueError("distances must be either scalars or 1d")
if len(distances) != f.shape[axes[i]]:
raise ValueError("when 1d, distances must match "
"the length of the corresponding dimension")
diffx = np.diff(distances)
# if distances are constant reduce to the scalar case
# since it brings a consistent speedup
if (diffx == diffx[0]).all():
diffx = diffx[0]
dx[i] = diffx
else:
raise TypeError("invalid number of arguments")

# 边界求导方式,默认一阶,最多2阶
edge_order = kwargs.pop(‘edge_order‘, 1)
if kwargs:
raise TypeError(‘"{}" are not valid keyword arguments.‘.format(
‘", "‘.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("‘edge_order‘ greater than 2 not supported")

# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.

outvals = []

# create slice objects --- initially all are [:, :, ..., :]
# 为每一轴设置切片函数
slice1 = [slice(None)] * N
slice2 = [slice(None)] * N
slice3 = [slice(None)] * N
slice4 = [slice(None)] * N

otype = f.dtype
# 定义输出数值类型,先解析输入数组数值类型
# 除特殊类型(np.datetime64/np.timedelta64/np.inexact)外,均转换成np.double
if otype.type is np.datetime64:
# the timedelta dtype with the same unit information
otype = np.dtype(otype.name.replace(‘datetime‘, ‘timedelta‘))
# view as timedelta to allow addition
f = f.view(otype)
elif otype.type is np.timedelta64:
pass
elif np.issubdtype(otype, np.inexact):
pass
else:
# all other types convert to floating point
otype = np.double

# 求导每一轴
# 按axes定义的轴顺序来求导
# 为啥要弄个dx?,它和可选参数有关,不解析可选参数,所以默认为1.0,不影响计算
for axis, ax_dx in zip(axes, dx):
if f.shape[axis] < edge_order + 1:
# 每一轴的长度必须符合边界求导规则,比如一阶求导,必须有2个数值;2阶求导,必须有3个数值
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least (edge_order + 1) elements are required.")
# result allocation
# 创建未初始化数组
out = np.empty_like(f, dtype=otype)

# spacing for the current axis
# 因为不接戏可选参数,所以为True
uniform_spacing = np.ndim(ax_dx) == 0

# Numerical differentiation: 2nd order interior
# 切片1:取中间值
# 切片2:取前n-2个
# 切片3:取中间值
# 切片4:取后n-2个
slice1[axis] = slice(1, -1)
slice2[axis] = slice(None, -2)
slice3[axis] = slice(1, -1)
slice4[axis] = slice(2, None)

if uniform_spacing:
# 中间数值使用中心差分,有计算可知,是一阶差分公式
out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx)
else:
dx1 = ax_dx[0:-1]
dx2 = ax_dx[1:]
a = -(dx2) / (dx1 * (dx1 + dx2))
b = (dx2 - dx1) / (dx1 * dx2)
c = dx1 / (dx2 * (dx1 + dx2))
# fix the shape for broadcasting
shape = np.ones(N, dtype=int)
shape[axis] = -1
a.shape = b.shape = c.shape = shape
# 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]

# Numerical differentiation: 1st order edges
if edge_order == 1:
# 前向差分
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
dx_0 = ax_dx if uniform_spacing else ax_dx[0]
# 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0

# 后向差分
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
dx_n = ax_dx if uniform_spacing else ax_dx[-1]
# 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n

# Numerical differentiation: 2nd order edges
else:
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
if uniform_spacing:
a = -1.5 / ax_dx
b = 2. / ax_dx
c = -0.5 / ax_dx
else:
dx1 = ax_dx[0]
dx2 = ax_dx[1]
a = -(2. * dx1 + dx2) / (dx1 * (dx1 + dx2))
b = (dx1 + dx2) / (dx1 * dx2)
c = - dx1 / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]

slice1[axis] = -1
slice2[axis] = -3
slice3[axis] = -2
slice4[axis] = -1
if uniform_spacing:
a = 0.5 / ax_dx
b = -2. / ax_dx
c = 1.5 / ax_dx
else:
dx1 = ax_dx[-2]
dx2 = ax_dx[-1]
a = (dx2) / (dx1 * (dx1 + dx2))
b = - (dx2 + dx1) / (dx1 * dx2)
c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]

# 在结果数组中添加该轴计算得到的梯度数组
outvals.append(out)

# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)

# 返回
if len_axes == 1:
return outvals[0]
else:
return outvals


arr1 = np.array(range(5))
print(np.gradient(arr1))

以上是关于gradient函数的主要内容,如果未能解决你的问题,请参考以下文章

gradient函数

R语言ggplot2可视化地图并使用scale_fill_gradient函数自定义设置地图颜色刻度为灰色梯度刻度(grey gradient scales)

Batch Gradient Descendent (BGD) & Stochastic Gradient Descendent (SGD)

随机梯度下降(Stochastic gradient descent)和 批量梯度下降(Batch gradient descent )的公式对比实现对比[转]

Gradient Descent 和 Stochastic Gradient Descent(随机梯度下降法)

c++ 中的 np.gradient 替代方案