Мне нужно ускорить этот код до 4 миллисекунд.
import numpy as np
def return_call(data):
num = int(data.shape[0] / 4096)
buff_spectrum = np.empty(2048,dtype= np.uint64)
buff_detect = np.empty(2048,dtype= np.uint64)
end_spetrum = np.empty(num*1024,dtype=np.uint64)
end_detect = np.empty(num*1024,dtype= np.uint64)
_data = np.reshape(data,(num,4096))
for _raw_data_spec in _data:
raw_data_spec = np.reshape(_raw_data_spec,(2048,2))
for i in range(2048):
buff_spectrum[i] = (np.int16(raw_data_spec[i][0])<<17)|(np.int16(raw_data_spec[i][1] <<1))>>1
buff_detect[i] = (np.int16(raw_data_spec[i][0])>>15)
for i in range (511,-1,-1):
if buff_spectrum[i+1024] != 0:
end_spetrum[i]=(np.log10(buff_spectrum[i+1024]))
end_detect[i]=buff_detect[i+1024]
else:
end_spetrum[i] =0
end_detect[i] = 0
for i in range(1023, 511, -1):
if buff_spectrum[i+1024] != 0:
end_spetrum[i] = (np.log10(buff_spectrum[i + 1024]))
end_detect[i] = buff_detect[i + 1024]
else:
end_spetrum[i] = 0
end_detect[i] = 0
return end_spetrum, end_detect
Я решил использовать Cython для этой задачи. Но я не получил никакого ускорения.
import numpy as np
cimport numpy
ctypedef signed short DTYPE_t
cpdef return_call(numpy.ndarray[DTYPE_t, ndim=1] data):
cdef int i
cdef int num = data.shape[0]/4096
cdef numpy.ndarray _data
cdef numpy.ndarray[unsigned long long, ndim=1] buff_spectrum = np.empty(2048,dtype= np.uint64)
cdef numpy.ndarray[ unsigned long long, ndim=1] buff_detect = np.empty(2048,dtype= np.uint64)
cdef numpy.ndarray[double , ndim=1] end_spetrum = np.empty(num*1024,dtype= np.double)
cdef numpy.ndarray[double , ndim=1] end_detect = np.empty(num*1024,dtype= np.double)
_data = np.reshape(data,(num,4096))
for _raw_data_spec in _data:
raw_data_spec = np.reshape(_raw_data_spec,(2048,2))
for i in range(2048):
buff_spectrum[i] = (np.uint16(raw_data_spec[i][0])<<17)|(np.uint16(raw_data_spec[i][1] <<1))>>1
buff_detect[i] = (np.uint16(raw_data_spec[i][0])>>15)
for i in range (511,-1,-1):
if buff_spectrum[i+1024] != 0:
end_spetrum[i]=(np.log10(buff_spectrum[i+1024]))
end_detect[i]=buff_detect[i+1024]
else:
end_spetrum[i] =0
end_detect[i] = 0
for i in range(1023, 511, -1):
if buff_spectrum[i+1024] != 0:
end_spetrum[i] = (np.log10(buff_spectrum[i + 1024]))
end_detect[i] = buff_detect[i + 1024]
else:
end_spetrum[i] = 0
end_detect[i] = 0
return end_spetrum, end_detect
Максимальная скорость, которую я достиг, - 80 миллисекунд, но она мне нужна намного быстрее. Так как вам нужно обрабатывать данные из железа практически в реальном времени, скажите мне причину. И реально ли c добиться желаемых результатов. Я также прилагаю код для тестового файла.
import numpy as np
import example_original
import example_cython
data = np.empty(8192*2, dtype=np.int16)
import time
startpy = time.time()
example_original.return_call(data)
finpy = time.time() -startpy
startcy = time.time()
k,r = example_cython.return_call(data)
fincy = time.time() -startcy
print( fincy, finpy)
print('Cython is {}x faster'.format(finpy/fincy))