Я не эксперт по PCA, но, похоже, получаю аналогичные значения, если перенесу одну из матриц.
>>> import numpy as np
>>> LA = np.linalg
>>> d_train = np.random.randn(100, 10)
>>> d_cov = np.cov(d_train.transpose())
>>> eigens, mypca = LA.eig(d_cov)
>>> from sklearn.decomposition import PCA
>>> pca = PCA(n_components=10)
>>> d_fit = pca.fit_transform(d_train)
>>> pc = pca.components_
>>> mypca[0,:]
array([-0.44255435, -0.77430549, -0.14479638, -0.06459874, 0.24772212,
0.20780185, 0.22388151, -0.05069543, -0.14515676, -0.03385801])
>>> pc[0,:]
array([-0.44255435, -0.24050535, -0.17313927, 0.07182494, 0.09748632,
0.17910516, 0.26125107, 0.71309764, 0.17276004, 0.25095447])
>>> pc.transpose()[0,:]
array([-0.44255435, 0.77430549, 0.14479638, -0.06459874, 0.24772212,
-0.20780185, 0.22388151, -0.03385801, 0.14515676, 0.05069543])
>>> list(zip(pc.transpose()[:,0], mypca[:,0]))
[(-0.44255435328718207, -0.44255435328718096),
(-0.24050535133912765, -0.2405053513391287),
(-0.17313926714559819, -0.17313926714559785),
(0.07182494253930383, 0.0718249425393035),
(0.09748631534772645, 0.09748631534772684),
(0.17910516453826955, 0.17910516453826758),
(0.2612510722861703, 0.2612510722861689),
(0.7130976419217306, 0.7130976419217326),
(0.17276004381786172, 0.17276004381786136),
(0.25095447415020183, 0.2509544741502009)]