Skip to content

Commit

Permalink
refs #1 rao now loops through each model for each particle as it shou…
Browse files Browse the repository at this point in the history
…ld, and values are set to zero when transfering from CA to CV.
  • Loading branch information
Isaac-JenkinsRA committed Mar 10, 2020
1 parent 22b1768 commit 6b6acf6
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 53 deletions.
11 changes: 5 additions & 6 deletions .idea/workspace.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 4 additions & 6 deletions RaoBlackwellised.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,14 @@
import os

seed(100)
DRONE_FILE = 23
DRONE_FILE = 24
DATA_DIR = "P:/DASA/EDITTS Drone Tracking/GFI/GPS Tracking"
# DATA_DIR = "C:/Work/Drone_Tracking/EDITTS-Drone-Tracking/data/raw/"
SAVE_DIR = "C:/Work/Drone_Tracking/multi_model_results"
FIXED_WING = {"g2", "g4", "maja", "bixler", "x8", "kahu"}
ROTARY_WING = {"g6", "f550", "drdc"}

NUMBER_OF_PARTICLES = 100
NUMBER_OF_PARTICLES = 250
rw_cv_noise_covariance = 0.1
fw_cv_noise_covariance = 0.5
rw_hover_noise_covariance = 0.5
Expand Down Expand Up @@ -183,12 +183,10 @@ def choose_model():

# print([particle_proportions.count(i) for i in range(len(transition))])

model_probabilities.append([sum([p.model_probabilities[i] for p in prediction.particles]) / NUMBER_OF_PARTICLES
model_probabilities.append([sum([(p.model_probabilities[i] * p.weight)
for p in prediction.particles])
for i in range(len(transition_block))])

# print([sum([p.model_probabilities[i] for p in prediction.particles]) / NUMBER_OF_PARTICLES
# for i in range(len(transition))])

dynamic_model_split.append([particle_proportions.count(i) for i in range(len(transition_block))])

craft_sum = np.cumsum(detection_matrix_split)
Expand Down
3 changes: 2 additions & 1 deletion functions_for_particle_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,15 +176,16 @@ def __init__(self, truth, track, effective_sample_size, dynamic_model_split, mod
self.track = track
self.effective_sample_size = effective_sample_size
self.dynamic_model_split = dynamic_model_split
self.model_probabilities = model_probabilities
self.weighted_sum_per_model = weighted_sum_per_model
self.detection_matrix_split = detection_matrix_split
self.craft_sum = np.cumsum(self.detection_matrix_split)

self.dynamic_model_plot = [[element[j] for element in dynamic_model_split] for j in
range(len(dynamic_model_split[0]))]

self.model_probabilities_plot = [[element[j] for element in model_probabilities] for j in
range(len(model_probabilities[0]))]

self.sum_of_probs = sum([sum(self.craft_prob()[i]) for i in range(len(self.craft_prob()))])

def calculate_sum_weighted_positions(self):
Expand Down
79 changes: 39 additions & 40 deletions stonesoup/updater/particle.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def update(self, hypothesis, predictor=None, always_resample=True, **kwargs):
for particle in hypothesis.prediction.particles:
particle.weight *= measurement_model.pdf(
hypothesis.measurement.state_vector, particle.state_vector,
**kwargs)
**kwargs) * predictor.transition_matrix[particle.parent.dynamic_model][particle.dynamicmodel]

# Normalise the weights
sum_w = Probability.sum(
Expand Down Expand Up @@ -220,23 +220,21 @@ def update(self, hypothesis, predictor=None, prior_timestamp=None,
time_interval = hypothesis.prediction.timestamp - prior_timestamp

for particle in hypothesis.prediction.particles:
particle.model_probabilities = self.calculate_model_probabilities(

new_model_probabilities = self.calculate_model_probabilities(
particle, predictor.position_mapping, transition, predictor.model_list, time_interval)

for particle in hypothesis.prediction.particles:
predictor.transition_matrix = transition
particle.model_probabilities = new_model_probabilities

prob_y_given_x = measurement_model.pdf(
hypothesis.measurement.state_vector, particle.state_vector,
**kwargs)

"""prob_position_given_previous_position = sum(
self.calculate_model_probabilities(particle, predictor, time_interval))
"""prob_position_given_previous_position = sum(new_model_probabilities)
prob_proposal = self.calculate_model_probabilities(
particle, predictor, time_interval)[particle.dynamic_model] / prob_position_given_previous_position
prob_proposal = new_model_probabilities[particle.dynamic_model] / prob_position_given_previous_position"""

particle.weight *= prob_y_given_x * prob_position_given_previous_position / prob_proposal"""
# particle.weight *= prob_y_given_x * prob_position_given_previous_position / prob_proposal
particle.weight *= prob_y_given_x

# Normalise the weights
Expand Down Expand Up @@ -286,37 +284,38 @@ def calculate_model_probabilities(self, particle, position_mapping,
previous_probabilities = particle.model_probabilities

denominator = []
for i, model in enumerate(model_list):
# if p(m_k|m_k-1) = 0 then p(m_k|x_1:k) = 0
transition_probability = transition_matrix[
particle.parent.dynamic_model][i]
# Getting required states to apply the model to that state vector
parent_required_state_space = particle.parent.state_vector[
np.array(position_mapping[i])]

# The noiseless application of m_k onto x_k-1

mean = model.function(parent_required_state_space,
time_interval=time_interval, noise=False)

# Input the indices that were removed previously
for j in range(len(particle.state_vector)):
if j not in position_mapping[i]:
mean = np.insert(mean, j, particle.state_vector[j])

# Extracting x, y, z from the particle
particle_position = self.measurement_model.matrix() @ particle.state_vector

prob_position_given_model_and_old_position = self.measurement_model.pdf(
particle_position, mean)
# p(m_k-1|x_1:k-1)
prob_previous_iteration_given_model = previous_probabilities[i]

product_of_probs = (prob_position_given_model_and_old_position *
transition_probability *
prob_previous_iteration_given_model)
denominator.append(product_of_probs)

for k in range(len(previous_probabilities)):
for i, model in enumerate(model_list):
# if p(m_k|m_k-1) = 0 then p(m_k|x_1:k) = 0
transition_probability = transition_matrix[
particle.parent.dynamic_model][i]
# Getting required states to apply the model to that state vector
parent_required_state_space = particle.parent.state_vector[
np.array(position_mapping[i])]

# The noiseless application of m_k onto x_k-1

mean = model.function(parent_required_state_space,
time_interval=time_interval, noise=False)

# Input the indices that were removed previously
for j in range(len(particle.state_vector)):
if j not in position_mapping[i]:
mean = np.insert(mean, j, 0)

# Extracting x, y, z from the particle
particle_position = self.measurement_model.matrix() @ particle.state_vector

prob_position_given_model_and_old_position = self.measurement_model.pdf(
particle_position, mean)
# p(m_k-1|x_1:k-1)
prob_previous_iteration_given_model = previous_probabilities[k]

product_of_probs = (prob_position_given_model_and_old_position *
transition_probability *
prob_previous_iteration_given_model)
denominator.append(product_of_probs)
print(denominator)
new_probabilities = []
for i in range(len(previous_probabilities)):
new_probabilities.append(denominator[i] / sum(denominator))
Expand Down

0 comments on commit 6b6acf6

Please sign in to comment.