Fin. LW, except for computing p of query

This commit is contained in:
BuildTools 2021-09-15 03:49:15 -05:00
parent ed5514b3d8
commit 976470f6c4

165
main.py
View File

@ -5,12 +5,15 @@
import json
import random
from collections import defaultdict
def main():
bayes_net = import_bayes()
print(len(bayes_net))
print(bayes_net["0"]["prob"][0][1])
#print(is_root("3", bayes_net))
ev = {"0" : True, "4" : True}
W = likelihood_weighting(1, ev, bayes_net, 10)
print()
for key, value in W.items():
print(key, ' : ', value)
#Import the BN from the json
def import_bayes():
@ -19,65 +22,145 @@ def import_bayes():
json_file.close()
return bayes_json
#Checks if node has parents
def is_root(node, BN):
return (BN[node]["parents"]) == []
#Return a list of the root nodes
def get_root(node, BN):
def get_root(BN):
roots = []
for i in range(len(BN)):
if ((BN[node]["parents"]) == []):
roots.append(node)
for node in range(len(BN)):
if ((BN[str(node)]["parents"]) == []):
roots.append(str(node))
return roots
#Get parents of a node
def get_parents(node, BN):
return BN[str(node)]["parents"]
#print(bayes_json["x"]): prints the information about node x (an int)
#print(bayes_json["x"]["parents"] prints the information about node x's parents
#class BayesianNetwork:
"""NOTES"""
#(bayes_json["x"]): the information about node x (an int)
#(bayes_json["x"]["parents"] the information about node x's parents
#bayes_json["x"]["prob"][0][0] returns the first set of truth table (0, 0), where [1][0] is the second (0,1)
#bayes_json["x"]["prob"][parent][1] returns probability of the set evidence variable
#E is a dict in the form of {"Node" : Value}
#Compute the estimate of P(X|e), where X is the query variable, and e is the observed value for variables E
def likelihood_weighting(X, e, bayes_net, samples):
#Vector of weighted counts for each value of X, initialized to zero
def likelihood_weighting(X, e, bayes_net, num_samples):
W = {}
#Init True and False probabilities
T = 0
F = 0
for i in range(samples):
x,w = weighted_sample(bayes_net, e)
print("Hello")
#Returns an event and a weight
def weighted_sample(bayes_net, e):
for i in range(num_samples):
w = 1
sample = {}
#Elements in event x
for node in bayes_net:
#if bayes_net[e]["prob"][node] != None:
if e[node] !=None:
w*= bayes_net[node]["prob"][sample][1]
sample[node] = e[node]
#Holds all the info on the samples. EX: ~b, ~e, a, ~j, m
samples = {}
#Get all the roots to save traversion costs
root_nodes = get_root(bayes_net)
#Go through all the roots to get probabilities
for root in root_nodes:
#If the root is an evidence variables
if root in e and root not in samples:
#Just set the value to the observed value
samples[root] = e[root]
#Adjust the weight accordingly
w = w * bayes_net[root]["prob"][0][1]
else:
#Random sample
random_sample = random()
sample[node] = #True or False
#Otherwise, sample randomly
if root not in samples:
rand_prob = random.random()
if rand_prob >= bayes_net[root]["prob"][0][1]:
samples[root] = True
else:
samples[root] = False
#v
#Now go through the BN for non-root nodes
for node in bayes_net:
if node not in samples:
#Get the probability, updated sample dict, and weight
samples, prob, w = get_probability(str(node), samples, e, bayes_net, w)
#for i in range(len(n)):
#We now need to write to W
#If this sample is already in W, don't add a new sample - only adjust the weight
written = False
for tmp in range(len(W)):
#If sample is in W
if samples in W[tmp].values():
#Pull the weight that's associated with the sample
key = list(W[tmp].items())[0][0]
#Add the new weight to the existing weight
new_key = key + w
#Store it all back into W
W[tmp] = {new_key : samples}
#Make note that we've already written to W in this loop, so we don't write it again
written = True
#Given a node in the bayes net and a random sample, determine if it should be
#classified as True or False
def get_bool(node, rand_sample, sample_table, BN):
table = {}
if is_root(node, BN):
table
#If the sample wasn't already in W, put it in there now
if not written:
W[len(W)] = {w : samples}
return W
#Return the probability of a node and the value dict, given the current evidence and fixed evidence
#Uses recursion to pull probabilites and values down through the network
def get_probability(node, samples, ev, bayes_net, w):
parents = get_parents(node, bayes_net)
for parent in parents:
#If we already know the value of the parent, no need to reobtain
if str(parent) in samples:
continue
#If we don't know the value, then we need to get it
else:
gparents = get_parents(parent, bayes_net)
#If we have all of parent's parents, then we can just get the probability
if all(eles in samples for eles in gparents):
samples, prob, w = translate_ev(gparents, parent, ev, samples, bayes_net, w)
#Otherwise, we need to get values for the ancestor nodes - use recursion
else:
for gparent in gparents:
if gparent not in samples:
get_probability(gparent, samples, ev, bayes_net, w)
samples, prob, w = translate_ev(gparents, parent, ev, samples, bayes_net, w)
#Now that we have all the parents' values, we can get the node value, probability, and update samples
samples, prob, w = translate_ev(parents, node, ev, samples, bayes_net, w)
return samples, prob, w
#Given a node and its parents, determine the node's value and it's probability
def translate_ev(parents, node, ev, samples, bayes_net, w):
#Sort in ascending order
parents.sort()
node = str(node)
value_list = []
for parent in parents:
value = samples[str(parent)]
value_list.append(value)
#See if this is an evidence node
if node in ev:
samples[node] = ev[node]
get_weight = True
else:
get_weight = False
#The truth table has 2^parents entries
for i in range(2**len(parents)):
#If the truth table matches the value combination we have
if bayes_net[str(node)]["prob"][i][0] == value_list:
#Sample randomly
rand_prob = random.random()
table_prob = bayes_net[str(node)]["prob"][i][1]
if rand_prob >= table_prob:
samples[str(node)] = True
else:
samples[str(node)] = False
table_prob = 1-table_prob
if(get_weight):
w = w * table_prob
return samples, table_prob, w
# for
def gibbs_sampling():
print("Hello")