12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091 |
- import numpy as np
- import os
-
-
- class Localization:
- def __init__(self):
- self.scale_factor = 0.003
- self.k_nearest = 15
- #self.table = np.load('fingerprinting_tables/Julian_1cm_precision_corrected_antenas.npy') # corrected table made by Ihm (No ferrite)
- #self.table = np.load('fingerprinting_tables/SmallManualFingerprint_Ferrite.npy') # manual fingerprinting table (Ferrite) [A bit off]
- #self.table = np.load('fingerprinting_tables/Julian_BThesis_table2_switchedAnt5&6 and 7&8.npy') # Switched Ant5<->6 and 7<->8 in Excel (No Ferrite) [Does not work!]
- self.table = np.load('fingerprinting_tables/Julian_THIS_ONE_IS_IT.npy') # 2cm precision this definetly worked for (No ferrite)
- self.data = np.load('recorded_data/current_recording.npy')
-
-
- def localize(self, fv):
- """ Input: - fv [type = numpy array (mb list) [15, 1]]:
- A feature vector contains the antenna voltages of one sample in following order:
- frame1, frame2, ... frame 8, main1, main2, ... main8
- - scale_factor [type = float]:
- This is multiplied with the fv to adjust the difference in constants between the real world
- and the measurements. Things like resistance, coil windings, amplification factors
- - k_nearest [type = int]:
- This tells the localization method k-nearest how many neighbours it should take into account
- to average the position in the end
-
- Output: - position [type = np.array[3]]:
- The estimated position of the object in this sample x,y,z
- """
- #print("fv =", fv)
- feature_vector = fv * self.scale_factor
-
-
- repeated_feature_vector = np.square(self.table[:, 9:] - feature_vector)
-
- euclidean_distances = np.sum(repeated_feature_vector, 1)
- order = np.argsort(euclidean_distances)
-
- minDist = np.sqrt(euclidean_distances[order[0]])
- maxDist = np.sqrt(euclidean_distances[order[self.k_nearest - 1]])
- dsum = 0.0
- position = np.array([0.0, 0.0, 0.0])
- for idx in order[:self.k_nearest]:
- d = (maxDist - np.sqrt(euclidean_distances[idx])) / (maxDist - minDist)
- dsum += d
-
- position += self.table[idx][:3] * d
- position /= dsum
- #print(position)
- return position
-
- def localize_all_samples(self, input_path, output_path):
- # Load data
- data = np.load('recorded_data/' + input_path + ".npy")
- # Just User Feedback
- print("Start calculating positions from: recorded_data/" + input_path + ".npy")
- print("With: scale_factor=", self.scale_factor, ", k_nearest=", self.k_nearest, ", every 10th sample")
-
- data = data[::10, :] # taking only every 10th sample
-
- positions = np.empty((np.shape(data)[0], 3), dtype=np.float)
- #Normal Localization
- positions = np.empty((np.shape(data)[0], 3), dtype=np.float)
- for i in range(np.shape(data)[0]):
- fv = data[i, 3:]
- positions[i, :] = self.localize(fv)
- #print("loc progress=", i)
- # Save result
- np.save('calculated_positions/' + output_path, positions)
- print("Saved result in: calculated_positions/" + output_path + ".npy")
-
-
- def localize_averaged_samples(self, input_path, output_path):
-
- # Load data
- data = np.load('recorded_data/' + input_path + ".npy")
- # Just User Feedback
- print("Start calculating positions from: recorded_data/" + input_path + ".npy")
- print("With: scale_factor=", self.scale_factor, ", k_nearest=", self.k_nearest)
-
- # Average the recorded samples before localization
- positions = np.empty((np.shape(data)[0], 3), dtype=np.float)
- mean_data = np.zeros(np.shape(data))
- mean_data[:, :] = np.mean(data, axis=0) # average all recorded samples
-
- fv = mean_data[0, 3:] # as now all of mean_data is the same in every entry, just take any entry (at pos 0)
- positions = self.localize(fv) # we get a single position out
- print("Averaged position: x=", positions[0], ", y=", positions[1], ", z=", positions[2])
- # Save result
- np.save('calculated_positions/'+output_path, positions)
- print("Saved result in: calculated_positions/"+output_path+".npy")
|