#### Using Python v3.x
import numpy as np
def power_iteration(A, num_simulations: int):
# Ideally choose a random vector such that its norm is 1 and entries is mostly non-zero because A.x = 0 if x is 0.
# Also, to decrease the chance that our vector is orthogonal to the eigenvector
u_k = np.random.rand(A.shape[1])
for _ in range(num_simulations):
# calculate the matrix-by-vector product Ab
un_u_k = np.dot(A, u_k)
# calculate the norm
un_u_k_norm = np.linalg.norm(un_u_k)
# re normalize the vector
u_k = un_u_k / un_u_k_norm
# compute eigenvalue
ev_u_k = np.dot(np.dot(A, u_k), u_k)/np.dot(u_k, u_k)
return u_k, ev_u_k
"""
For A = [ 1 3 0]
[ 2 5 1]
[-1 2 3], we compute the largest eigenvalue and eigenvector corresponding to the largest eigenvalue:
"""
u_k, ev_u_k = power_iteration(np.array([[1, 3, 0], [2, 5, 1], [-1, 2, 3]]), 100)
print("Eigenvector corresponding to the largest eigenvalue:", u_k)
print("Largest eigenvalue:", ev_u_k)
Eigenvector corresponding to the largest eigenvalue: [0.44957722 0.82498015 0.34247346] Largest eigenvalue: 6.505039724619893