{\vspace {\baselineskip }} \contentsline {figure}{\numberline {3.1}{\ignorespaces Path Walking to State 14\relax }}{9}{}% \contentsline {figure}{\numberline {3.2}{\ignorespaces Color Coding a Small Network Based on Violations\relax }}{10}{}% \contentsline {figure}{\numberline {4.1}{\ignorespaces A network without Synchronous Firing generating infeasible states\relax }}{19}{}% \contentsline {figure}{\numberline {4.2}{\ignorespaces Inclusion of Synchronous Firing into GNU Bison, GNU Flex, and the overall program\relax }}{22}{}% \contentsline {figure}{\numberline {4.3}{\ignorespaces Synchronous Firing in the Graph Generation Process\relax }}{24}{}% \contentsline {figure}{\numberline {4.4}{\ignorespaces Bar Graph and Line Graph Representations of Synchronous Firing on Runtime\relax }}{27}{}% \contentsline {figure}{\numberline {4.5}{\ignorespaces Bar Graph and Line Graph Representations of Synchronous Firing on State Space\relax }}{28}{}% \contentsline {figure}{\numberline {5.1}{\ignorespaces Task Overview of the Attack and Compliance Graph Generation Process\relax }}{31}{}% \contentsline {figure}{\numberline {5.2}{\ignorespaces Node Allocation for each Task\relax }}{33}{}% \contentsline {figure}{\numberline {5.3}{\ignorespaces Data Distribution of Task One\relax }}{35}{}% \contentsline {figure}{\numberline {5.4}{\ignorespaces Communication From Task 1 to Task 2 when the Number of Nodes Allocated is Equal\relax }}{36}{}% \contentsline {figure}{\numberline {5.5}{\ignorespaces Communication From Task 1 to Task 2 when Task 1 Has More Nodes Allocated\relax }}{37}{}% \contentsline {figure}{\numberline {5.6}{\ignorespaces Example of a Not Applicable Exploit for the MPI Tasking Testing\relax }}{41}{}% \contentsline {figure}{\numberline {5.7}{\ignorespaces Speedup and Efficiency of the MPI Tasking Approach for a Varying Number of Compute Nodes with an Increasing Problem Size\relax }}{41}{}% \contentsline {figure}{\numberline {5.8}{\ignorespaces Example Graph Using the MPI Subgraphing Approach\relax }}{43}{}% \contentsline {figure}{\numberline {5.9}{\ignorespaces Frontier Merging and Data Distribution Process\relax }}{45}{}% \contentsline {figure}{\numberline {5.10}{\ignorespaces First iteration results of MPI Subgraphing in terms of Runtime\relax }}{47}{}% \contentsline {figure}{\numberline {5.11}{\ignorespaces First iteration results of MPI Subgraphing in terms of Speedup and Efficiency\relax }}{48}{}% \contentsline {figure}{\numberline {5.12}{\ignorespaces Modified Subgraphing Example Graph with Two New Edges\relax }}{50}{}% \contentsline {figure}{\numberline {5.13}{\ignorespaces Duplicate States Explored vs Actual Number of States for the 1-4 Service Tests\relax }}{51}{}% \contentsline {figure}{\numberline {5.14}{\ignorespaces Speedup and Efficiency of MPI Subgraphing when using a DHT\relax }}{53}{}% \contentsline {figure}{\numberline {5.15}{\ignorespaces Runtime of MPI Subgraphing when using a DHT vs not using a DHT\relax }}{54}{}% \contentsline {figure}{\numberline {6.1}{\ignorespaces Possible Method for Blending MPI and OpenMP for Task 2 of the MPI Tasking Approach\relax }}{57}{}%