123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130 |
- import numpy as np
- import os
- from copy import deepcopy
-
-
- class Localization:
- def __init__(self):
- self.scale_factor = 0.003
- self.k_nearest = 15
- #self.table = np.load('fingerprinting_tables/Julian_1cm_precision_corrected_antenas.npy') # corrected table made by Ihm (No ferrite)
- #self.table = np.load('fingerprinting_tables/SmallManualFingerprint_Ferrite.npy') # manual fingerprinting table (Ferrite) [A bit off]
- #self.table = np.load('fingerprinting_tables/Julian_BThesis_table2_switchedAnt5&6 and 7&8.npy') # Switched Ant5<->6 and 7<->8 in Excel (No Ferrite) [Does not work!]
- self.table = np.load('fingerprinting_tables/Julian_THIS_ONE_IS_IT.npy') # 2cm precision this definetly worked for (No ferrite)
- self.normalized_table = deepcopy(self.table)
- self.data = np.load('recorded_data/current_recording.npy')
-
- def localize(self, fv, exclude_frames=False):
- """ Input: - fv [type = numpy array (mb list) [15, 1]]:
- A feature vector contains the antenna voltages of one sample in following order:
- frame1, frame2, ... frame 8, main1, main2, ... main8
- - scale_factor [type = float]:
- This is multiplied with the fv to adjust the difference in constants between the real world
- and the measurements. Things like resistance, coil windings, amplification factors
- - k_nearest [type = int]:
- This tells the localization method k-nearest how many neighbours it should take into account
- to average the position in the end
-
- Output: - position [type = np.array[3]]:
- The estimated position of the object in this sample x,y,z
- """
-
- feature_vector = fv * self.scale_factor
-
- if exclude_frames:
- table_copy = deepcopy(self.table) # make a copy of the fingerprinting table
- table_copy[:, 17:25] = 0 # set all mains in the copy to 0
-
- max_idx = np.argmax(abs(feature_vector[:8])) # find highest Frame
- print("Feature_vector BEFORE taking highest frame out:\n", feature_vector)
- print("Highest Frame:", max_idx+1)
- feature_vector[max_idx] = 0
- print("Feature_vector AFTER taking highest frame out:\n", feature_vector)
-
- table_copy[:, max_idx+9] = 0 # set the row of highest Frame in fingerprinting to 0
-
- print("First row of table", table_copy[0])
-
- # print("feature vector =", feature_vector)
- # print("max_idx=", max_idx)
- # print("tablecopy= ", table_copy[0, :])
- repeated_feature_vector = np.square(table_copy[:, 9:] - feature_vector)
- else:
- repeated_feature_vector = np.square(self.table[:, 9:] - feature_vector) # Before changing anything on this function
-
- euclidean_distances = np.sum(repeated_feature_vector, 1)
- order = np.argsort(euclidean_distances)
-
- minDist = np.sqrt(euclidean_distances[order[0]])
- maxDist = np.sqrt(euclidean_distances[order[self.k_nearest - 1]])
- dsum = 0.0
- position = np.array([0.0, 0.0, 0.0])
- for idx in order[:self.k_nearest]:
- d = (maxDist - np.sqrt(euclidean_distances[idx])) / (maxDist - minDist)
- dsum += d
-
- position += self.table[idx][:3] * d
- position /= dsum
- #print(position)
- return position*1000 # conversion from metre to mm
-
- def localize_all_samples(self, input_path, output_path):
- # Load data
- data = np.load('recorded_data/' + input_path + ".npy")
- # Just User Feedback
- print("Start calculating positions from: recorded_data/" + input_path + ".npy")
- print("With: scale_factor=", self.scale_factor, ", k_nearest=", self.k_nearest, ", every 10th sample")
-
- data = data[::10, :] # taking only every 10th sample
-
- positions = np.empty((np.shape(data)[0], 3), dtype=np.float)
- #Normal Localization
- positions = np.empty((np.shape(data)[0], 3), dtype=np.float)
- for i in range(np.shape(data)[0]):
- fv = data[i, 3:]
- positions[i, :] = self.localize(fv)
- #print("loc progress=", i)
- # Save result
- np.save('calculated_positions/' + output_path, positions)
- print("Saved result in: calculated_positions/" + output_path + ".npy")
-
- def localize_all_samples_direct(self, data, output_path):
- """ same as localize all samples, but takes input_data directly and doesnt load it"""
- # Just User Feedback
- # print("Start calculating positions from data with shape:", np.shape(data))
- # print("With: scale_factor=", self.scale_factor, ", k_nearest=", self.k_nearest, ", every 10th sample")
-
- #data = data[::10, :] # taking only every 10th sample
-
- positions = np.empty((np.shape(data)[0], 3), dtype=np.float)
- #Normal Localization
- positions = np.empty((np.shape(data)[0], 3), dtype=np.float)
- for i in range(np.shape(data)[0]):
- fv = data[i, 3:]
- #print("fv=", fv)
- positions[i, :] = self.localize(fv)
- #print("loc progress=", i)
- # Save result
- np.save('calculated_positions/' + output_path, positions)
- #print("Saved result in: calculated_positions/" + output_path + ".npy")
- return positions
-
- def localize_averaged_samples(self, input_path, output_path):
-
- # Load data
- data = np.load('recorded_data/' + input_path + ".npy")
- # Just User Feedback
- print("Start calculating positions from: recorded_data/" + input_path + ".npy")
- print("With: scale_factor=", self.scale_factor, ", k_nearest=", self.k_nearest)
-
- # Average the recorded samples before localization
- positions = np.empty((np.shape(data)[0], 3), dtype=np.float)
- mean_data = np.zeros(np.shape(data))
- mean_data[:, :] = np.mean(data, axis=0) # average all recorded samples
-
- fv = mean_data[0, 3:] # as now all of mean_data is the same in every entry, just take any entry (at pos 0)
- positions = self.localize(fv) # we get a single position out
- print("Averaged position: x=", positions[0], ", y=", positions[1], ", z=", positions[2])
- # Save result
- np.save('calculated_positions/'+output_path, positions)
- print("Saved result in: calculated_positions/"+output_path+".npy")
|