{\vspace {\baselineskip }} \contentsline {figure}{\numberline {3.1}{\ignorespaces Path Walking to State 14\relax }}{9}{figure.caption.1}% \contentsline {figure}{\numberline {3.2}{\ignorespaces Color Coding a Small Network Based on Violations\relax }}{10}{figure.caption.2}% \contentsline {figure}{\numberline {4.1}{\ignorespaces A network without Synchronous Firing generating infeasible states\relax }}{19}{figure.caption.3}% \contentsline {figure}{\numberline {4.2}{\ignorespaces Inclusion of Synchronous Firing into GNU Bison, GNU Flex, and the overall program\relax }}{22}{figure.caption.4}% \contentsline {figure}{\numberline {4.3}{\ignorespaces Synchronous Firing in the Graph Generation Process\relax }}{24}{figure.caption.5}% \contentsline {figure}{\numberline {4.4}{\ignorespaces Synchronous Firing on Runtime}}{27}{figure.caption.6}% \contentsline {figure}{\numberline {4.5}{\ignorespaces Bar Graph and Line Graph Representations of Synchronous Firing on State Space\relax }}{28}{figure.caption.7}% \contentsline {figure}{\numberline {4.6}{\ignorespaces Speedup Obtained When Using Synchronous Firing\relax }}{28}{figure.caption.8}% \contentsline {figure}{\numberline {5.1}{\ignorespaces Task Overview of the Attack and Compliance Graph Generation Process\relax }}{31}{figure.caption.11}% \contentsline {figure}{\numberline {5.2}{\ignorespaces Node Allocation for each Task\relax }}{33}{figure.caption.12}% \contentsline {figure}{\numberline {5.3}{\ignorespaces Data Distribution of Task One\relax }}{35}{figure.caption.13}% \contentsline {figure}{\numberline {5.4}{\ignorespaces Communication From Task 1 to Task 2 when the Number of Nodes Allocated is Equal\relax }}{36}{figure.caption.14}% \contentsline {figure}{\numberline {5.5}{\ignorespaces Task 1 to Task 2 Communication, Case 2}}{37}{figure.caption.15}% \contentsline {figure}{\numberline {5.6}{\ignorespaces Example of a Not Applicable Exploit for the MPI Tasking Testing\relax }}{41}{figure.caption.17}% \contentsline {figure}{\numberline {5.7}{\ignorespaces Speedup and Efficiency of the MPI Tasking Approach for a Varying Number of Compute Nodes with an Increasing Problem Size\relax }}{42}{figure.caption.18}% \contentsline {figure}{\numberline {5.8}{\ignorespaces MPI Tasking Approach Runtime Results}}{43}{figure.caption.19}% \contentsline {figure}{\numberline {5.9}{\ignorespaces Results for the MPI Tasking Approach in Terms of Speedup\relax }}{43}{figure.caption.20}% \contentsline {figure}{\numberline {5.10}{\ignorespaces Results for the MPI Tasking Approach in Terms of Efficiency\relax }}{43}{figure.caption.21}% \contentsline {figure}{\numberline {5.11}{\ignorespaces Example Graph Using the MPI Subgraphing Approach\relax }}{45}{figure.caption.22}% \contentsline {figure}{\numberline {5.12}{\ignorespaces Frontier Merging and Data Distribution Process\relax }}{46}{figure.caption.23}% \contentsline {figure}{\numberline {5.13}{\ignorespaces First iteration results of MPI Subgraphing in terms of Runtime\relax }}{49}{figure.caption.25}% \contentsline {figure}{\numberline {5.14}{\ignorespaces MPI Subgraphing Results for Approach 1}}{50}{figure.caption.26}% \contentsline {figure}{\numberline {5.15}{\ignorespaces Modified Subgraphing Example Graph with Two New Edges\relax }}{52}{figure.caption.27}% \contentsline {figure}{\numberline {5.16}{\ignorespaces MPI Subgraphing Duplicate States}}{53}{figure.caption.28}% \contentsline {figure}{\numberline {5.17}{\ignorespaces Speedup and Efficiency of MPI Subgraphing when using a DHT\relax }}{55}{figure.caption.29}% \contentsline {figure}{\numberline {5.18}{\ignorespaces Runtime of MPI Subgraphing when using a DHT vs not using a DHT\relax }}{56}{figure.caption.30}% \contentsline {figure}{\numberline {6.1}{\ignorespaces Possible Method for Blending MPI and OpenMP for Task 2 of the MPI Tasking Approach\relax }}{60}{figure.caption.33}%