我会用pytorch https://pytorch.org/
您使用 pytorch 的函数(并且我删除了abs以返回复数值)
def htransforms(data):
N = data.shape[-1]
# Allocates memory on GPU with size/dimensions of signal
transforms = torch.tensor(data).cuda()
torch.fft.fft(transforms, axis=-1)
transforms[:, 1:N//2] *= 2.0 # THIS STEP DOESN'T WORK
transforms[:, N//2 + 1: N] = 0+0j # NEITHER DOES THIS ONE
# Do IFFT on GPU: in place (same memory)
return torch.abs(torch.fft.ifft(transforms)).cpu()
但你的变换实际上与我发现的不同维基百科 https://en.wikipedia.org/wiki/Hilbert_transform#Discrete_Hilbert_transform
维基百科版本
def htransforms_wikipedia(data):
N = data.shape[-1]
# Allocates memory on GPU with size/dimensions of signal
transforms = torch.tensor(data).cuda()
transforms = torch.fft.fft(transforms, axis=-1)
transforms[:, 1:N//2] *= -1j # positive frequency
transforms[:, (N+2)//2 + 1: N] *= +1j # negative frequency
transforms[:,0] = 0; # DC signal
if N % 2 == 0:
transforms[:, N//2] = 0; # the (-1)**n term
# Do IFFT on GPU: in place (same memory)
return torch.fft.ifft(transforms).cpu()
data = torch.zeros((1, 2**10))
data[:, 2**9] = 1;
tdata = htransforms(data).data;
plt.plot(tdata.real.T, '-')
plt.plot(tdata.imag.T, '-')
plt.xlim([500, 525])
plt.legend(['real', 'imaginary'])
plt.title('inpulse response of your version')
data = torch.zeros((1, 2**10))
data[:, 2**9] = 1;
tdata = htransforms_wikipedia(data).data;
plt.plot(tdata.real.T, '-');
plt.plot(tdata.imag.T, '-');
plt.xlim([500, 525])
plt.legend(['real', 'imaginary'])
plt.title('inpulse response of Wikipedia version')
您的版本的脉冲响应是1 + 1j * h[k]
where h[k]
是维基百科版本的脉冲响应。如果您正在使用真实数据,则维基百科版本很好,因为您可以使用rfft https://pytorch.org/docs/stable/fft.html#torch.fft.rfft and irfft https://pytorch.org/docs/stable/fft.html#torch.fft.irfft产生一个线性版本
def real_htransforms_wikipedia(data):
N = data.shape[-1]
# Allocates memory on GPU with size/dimensions of signal
transforms = torch.tensor(data).cuda()
transforms = -1j * torch.fft.rfft(transforms, axis=-1)
transforms[0] = 0;
return torch.fft.irfft(transforms, axis=-1)