Chapter 5 Editing

This commit is contained in:
Noah L. Schrick 2022-04-03 19:35:12 -05:00
parent 1cf62c43c1
commit 5d5e2c8f65
12 changed files with 280 additions and 257 deletions

View File

@ -1,4 +1,5 @@
\relax
\citation{Graphviz}
\@writefile{toc}{\contentsline {chapter}{\numberline {CHAPTER 3: }{\bf \uppercase {UTILITY EXTENSIONS TO THE RAGE ATTACK GRAPH GENERATOR}}}{8}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {3.1}\bf Path Walking}{8}{}\protected@file@percent }
\newlabel{sec:PW}{{3.1}{8}}
@ -6,14 +7,19 @@
\providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
\newlabel{fig:PW}{{3.1}{9}}
\@writefile{toc}{\contentsline {section}{\numberline {3.2}\bf Color Coding}{9}{}\protected@file@percent }
\citation{nichols_2018}
\citation{cook_rage_2018}
\@writefile{lof}{\contentsline {figure}{\numberline {3.2}{\ignorespaces Color Coding a Small Network Based on Violations\relax }}{10}{}\protected@file@percent }
\newlabel{fig:CC}{{3.2}{10}}
\@writefile{toc}{\contentsline {section}{\numberline {3.3}\bf Compound Operators}{10}{}\protected@file@percent }
\newlabel{sec:compops}{{3.3}{10}}
\citation{cook_rage_2018}
\@writefile{toc}{\contentsline {section}{\numberline {3.3}\bf Compound Operators}{11}{}\protected@file@percent }
\newlabel{sec:compops}{{3.3}{11}}
\citation{CVE-2019-10747}
\@writefile{toc}{\contentsline {section}{\numberline {3.4}\bf Relational Operators}{12}{}\protected@file@percent }
\newlabel{sec:relops}{{3.4}{12}}
\citation{nichols_2018}
\citation{cook_rage_2018}
\citation{cook_rage_2018}
\citation{cook_rage_2018}
\citation{li_concurrency_2019}
\citation{li_combining_2019}
@ -23,12 +29,13 @@
\citation{cook_rage_2018}
\@writefile{toc}{\contentsline {section}{\numberline {3.5}\bf Intermediate Database Storage}{13}{}\protected@file@percent }
\newlabel{sec:db-stor}{{3.5}{13}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5.1}\it Memory Constraint Difficulties}{13}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5.1}\it Introduction to Intermediate Database Storage}{13}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5.2}\it Memory Constraint Difficulties}{13}{}\protected@file@percent }
\citation{zhang_boosting_2017}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5.2}\it Maximizing Performance with Intermediate Database Storage}{14}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5.3}\it Portability}{16}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5.3}\it Maximizing Performance with Intermediate Database Storage}{15}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5.4}\it Portability}{16}{}\protected@file@percent }
\@setckpt{Chapter3}{
\setcounter{page}{17}
\setcounter{page}{18}
\setcounter{equation}{0}
\setcounter{enumi}{4}
\setcounter{enumii}{0}
@ -39,7 +46,7 @@
\setcounter{part}{0}
\setcounter{chapter}{3}
\setcounter{section}{5}
\setcounter{subsection}{3}
\setcounter{subsection}{4}
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}

View File

@ -1,40 +1,40 @@
\relax
\@writefile{toc}{\contentsline {chapter}{\numberline {CHAPTER 4: }{\bf \uppercase {SYNCHRONOUS FIRING}}}{17}{}\protected@file@percent }
\newlabel{ch:Sync-Fire}{{4}{17}}
\@writefile{toc}{\contentsline {section}{\numberline {4.1}\bf Introduction}{17}{}\protected@file@percent }
\@writefile{toc}{\contentsline {chapter}{\numberline {CHAPTER 4: }{\bf \uppercase {SYNCHRONOUS FIRING}}}{18}{}\protected@file@percent }
\newlabel{ch:Sync-Fire}{{4}{18}}
\@writefile{toc}{\contentsline {section}{\numberline {4.1}\bf Introduction}{18}{}\protected@file@percent }
\citation{louthan_hybrid_2011}
\citation{louthan_hybrid_2011}
\citation{louthan_hybrid_2011}
\citation{louthan_hybrid_2011}
\citation{louthan_hybrid_2011}
\citation{cook_rage_2018}
\@writefile{lof}{\contentsline {figure}{\numberline {4.1}{\ignorespaces A network without Synchronous Firing generating infeasible states\relax }}{18}{}\protected@file@percent }
\newlabel{fig:non-sync_ex}{{4.1}{18}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1.1}\it Related Synchronous Firing Work}{18}{}\protected@file@percent }
\newlabel{sec:sync-lit}{{4.1.1}{18}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.1}{\ignorespaces A network without Synchronous Firing generating infeasible states\relax }}{19}{}\protected@file@percent }
\newlabel{fig:non-sync_ex}{{4.1}{19}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1.1}\it Related Synchronous Firing Work}{19}{}\protected@file@percent }
\newlabel{sec:sync-lit}{{4.1.1}{19}}
\citation{cook_rage_2018}
\citation{louthan_hybrid_2011}
\@writefile{toc}{\contentsline {section}{\numberline {4.2}\bf Necessary Alterations and Additions}{19}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2.1}\it GNU Bison and Flex}{19}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2.2}\it PostgreSQL}{20}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {4.2}{\ignorespaces Inclusion of Synchronous Firing into GNU Bison, GNU Flex, and the overall program\relax }}{21}{}\protected@file@percent }
\newlabel{fig:bison-flex}{{4.2}{21}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2.3}\it Compound Operators}{21}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2.4}\it Graph Generation}{21}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {4.3}\bf Experimental Networks and Results}{22}{}\protected@file@percent }
\newlabel{sec:test-platform}{{4.3}{22}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.3.1}\it Experimental Networks}{22}{}\protected@file@percent }
\newlabel{sec:Sync-Test}{{4.3.1}{22}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.3}{\ignorespaces Synchronous Firing in the Graph Generation Process\relax }}{23}{}\protected@file@percent }
\newlabel{fig:sync-fire}{{4.3}{23}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.3.2}\it Results}{24}{}\protected@file@percent }
\newlabel{sec:Sync-Results}{{4.3.2}{24}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.4}{\ignorespaces Bar Graph and Line Graph Representations of Synchronous Firing on Runtime\relax }}{26}{}\protected@file@percent }
\newlabel{fig:Sync-RT}{{4.4}{26}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.5}{\ignorespaces Bar Graph and Line Graph Representations of Synchronous Firing on State Space\relax }}{27}{}\protected@file@percent }
\newlabel{fig:Sync-State}{{4.5}{27}}
\@writefile{toc}{\contentsline {section}{\numberline {4.2}\bf Necessary Alterations and Additions}{20}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2.1}\it GNU Bison and Flex}{20}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2.2}\it PostgreSQL}{21}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {4.2}{\ignorespaces Inclusion of Synchronous Firing into GNU Bison, GNU Flex, and the overall program\relax }}{22}{}\protected@file@percent }
\newlabel{fig:bison-flex}{{4.2}{22}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2.3}\it Compound Operators}{22}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2.4}\it Graph Generation}{22}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {4.3}\bf Experimental Networks and Results}{23}{}\protected@file@percent }
\newlabel{sec:test-platform}{{4.3}{23}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.3.1}\it Experimental Networks}{23}{}\protected@file@percent }
\newlabel{sec:Sync-Test}{{4.3.1}{23}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.3}{\ignorespaces Synchronous Firing in the Graph Generation Process\relax }}{24}{}\protected@file@percent }
\newlabel{fig:sync-fire}{{4.3}{24}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.3.2}\it Results}{25}{}\protected@file@percent }
\newlabel{sec:Sync-Results}{{4.3.2}{25}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.4}{\ignorespaces Bar Graph and Line Graph Representations of Synchronous Firing on Runtime\relax }}{27}{}\protected@file@percent }
\newlabel{fig:Sync-RT}{{4.4}{27}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.5}{\ignorespaces Bar Graph and Line Graph Representations of Synchronous Firing on State Space\relax }}{28}{}\protected@file@percent }
\newlabel{fig:Sync-State}{{4.5}{28}}
\@setckpt{Chapter4}{
\setcounter{page}{28}
\setcounter{page}{29}
\setcounter{equation}{0}
\setcounter{enumi}{4}
\setcounter{enumii}{0}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 35 KiB

View File

@ -1,80 +1,80 @@
\relax
\citation{pacheco_introduction_2011}
\@writefile{toc}{\contentsline {chapter}{\numberline {CHAPTER 5: }{\bf \uppercase {Parallelization Using MESSAGE PASSING INTERFACE}}}{28}{}\protected@file@percent }
\newlabel{ch:MPI}{{5}{28}}
\@writefile{toc}{\contentsline {section}{\numberline {5.1}\bf Introduction to MPI Utilization for Attack and Compliance Graph Generation}{28}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {5.2}\bf Necessary Components}{28}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {5.2.1}\it Serialization}{28}{}\protected@file@percent }
\@writefile{toc}{\contentsline {chapter}{\numberline {CHAPTER 5: }{\bf \uppercase {Parallelization Using MESSAGE PASSING INTERFACE}}}{29}{}\protected@file@percent }
\newlabel{ch:MPI}{{5}{29}}
\@writefile{toc}{\contentsline {section}{\numberline {5.1}\bf Introduction to MPI Utilization for Attack and Compliance Graph Generation}{29}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {5.2}\bf Necessary Components}{29}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {5.2.1}\it Serialization}{29}{}\protected@file@percent }
\citation{li_concurrency_2019}
\citation{9150145}
\citation{7087377}
\@writefile{toc}{\contentsline {section}{\numberline {5.3}\bf Tasking Approach}{29}{}\protected@file@percent }
\newlabel{sec:Tasking-Approach}{{5.3}{29}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.1}\it Introduction to the Tasking Approach}{29}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.1}{\ignorespaces Task Overview of the Attack and Compliance Graph Generation Process\relax }}{30}{}\protected@file@percent }
\newlabel{fig:tasks}{{5.1}{30}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.2}\it Algorithm Design}{31}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.1}Communication Structure}{31}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.2}{\ignorespaces Node Allocation for each Task\relax }}{32}{}\protected@file@percent }
\newlabel{fig:node-alloc}{{5.2}{32}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.2}Task 0}{33}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.3}Task 1}{33}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.3}{\ignorespaces Data Distribution of Task One\relax }}{34}{}\protected@file@percent }
\newlabel{fig:Task1-Data-Dist}{{5.3}{34}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.4}Task 2}{34}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.5}Task 3}{34}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.4}{\ignorespaces Communication From Task 1 to Task 2 when the Number of Nodes Allocated is Equal\relax }}{35}{}\protected@file@percent }
\newlabel{fig:Task1-Case1}{{5.4}{35}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.5}{\ignorespaces Communication From Task 1 to Task 2 when Task 1 Has More Nodes Allocated\relax }}{36}{}\protected@file@percent }
\newlabel{fig:Task1-Case2}{{5.5}{36}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.6}Task 4 and Task 5}{36}{}\protected@file@percent }
\newlabel{sec:T4T5}{{5.3.2.6}{36}}
\@writefile{lot}{\contentsline {table}{\numberline {5.1}{\ignorespaces MPI Tags for the MPI Tasking Approach\relax }}{37}{}\protected@file@percent }
\newlabel{table:tasking-tag}{{5.1}{37}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.7}MPI Tags}{37}{}\protected@file@percent }
\newlabel{sec:tasking-tag}{{5.3.2.7}{37}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.3}\it Performance Expectations and Use Cases}{37}{}\protected@file@percent }
\newlabel{sec:Task-perf-expec}{{5.3.3}{37}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.4}\it Results}{38}{}\protected@file@percent }
\newlabel{sec:Tasking-Results}{{5.3.4}{38}}
\@writefile{toc}{\contentsline {section}{\numberline {5.3}\bf Tasking Approach}{30}{}\protected@file@percent }
\newlabel{sec:Tasking-Approach}{{5.3}{30}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.1}\it Introduction to the Tasking Approach}{30}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.1}{\ignorespaces Task Overview of the Attack and Compliance Graph Generation Process\relax }}{31}{}\protected@file@percent }
\newlabel{fig:tasks}{{5.1}{31}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.2}\it Algorithm Design}{32}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.1}Communication Structure}{32}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.2}{\ignorespaces Node Allocation for each Task\relax }}{33}{}\protected@file@percent }
\newlabel{fig:node-alloc}{{5.2}{33}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.2}Task 0}{34}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.3}Task 1}{34}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.3}{\ignorespaces Data Distribution of Task One\relax }}{35}{}\protected@file@percent }
\newlabel{fig:Task1-Data-Dist}{{5.3}{35}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.4}Task 2}{35}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.5}Task 3}{35}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.4}{\ignorespaces Communication From Task 1 to Task 2 when the Number of Nodes Allocated is Equal\relax }}{36}{}\protected@file@percent }
\newlabel{fig:Task1-Case1}{{5.4}{36}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.5}{\ignorespaces Communication From Task 1 to Task 2 when Task 1 Has More Nodes Allocated\relax }}{37}{}\protected@file@percent }
\newlabel{fig:Task1-Case2}{{5.5}{37}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.6}Task 4 and Task 5}{37}{}\protected@file@percent }
\newlabel{sec:T4T5}{{5.3.2.6}{37}}
\@writefile{lot}{\contentsline {table}{\numberline {5.1}{\ignorespaces MPI Tags for the MPI Tasking Approach\relax }}{38}{}\protected@file@percent }
\newlabel{table:tasking-tag}{{5.1}{38}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.3.2.7}MPI Tags}{38}{}\protected@file@percent }
\newlabel{sec:tasking-tag}{{5.3.2.7}{38}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.3}\it Performance Expectations and Use Cases}{38}{}\protected@file@percent }
\newlabel{sec:Task-perf-expec}{{5.3.3}{38}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.3.4}\it Results}{39}{}\protected@file@percent }
\newlabel{sec:Tasking-Results}{{5.3.4}{39}}
\citation{li_concurrency_2019}
\@writefile{toc}{\contentsline {section}{\numberline {5.4}\bf Subgraphing Approach}{39}{}\protected@file@percent }
\newlabel{sec:Subgraphing_Approach}{{5.4}{39}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.6}{\ignorespaces Example of a Not Applicable Exploit for the MPI Tasking Testing\relax }}{40}{}\protected@file@percent }
\newlabel{fig:NA-exp}{{5.6}{40}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.7}{\ignorespaces Speedup and Efficiency of the MPI Tasking Approach for a Varying Number of Compute Nodes with an Increasing Problem Size\relax }}{40}{}\protected@file@percent }
\newlabel{fig:Spd-Eff-Task}{{5.7}{40}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.1}\it Introduction to the Subgraphing Approach}{41}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.2}\it Algorithm Design}{41}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.8}{\ignorespaces Example Graph Using the MPI Subgraphing Approach\relax }}{42}{}\protected@file@percent }
\newlabel{fig:subg}{{5.8}{42}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.4.2.1}Worker Nodes}{42}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.4.2.2}Root Node}{43}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.9}{\ignorespaces Frontier Merging and Data Distribution Process\relax }}{44}{}\protected@file@percent }
\newlabel{fig:front-merg}{{5.9}{44}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.4.2.3}Database Node}{44}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.4.2.4}MPI Tags}{44}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.3}\it Performance Expectations and Use Cases}{44}{}\protected@file@percent }
\newlabel{sec:perf_expec_subg}{{5.4.3}{44}}
\@writefile{toc}{\contentsline {section}{\numberline {5.4}\bf Subgraphing Approach}{40}{}\protected@file@percent }
\newlabel{sec:Subgraphing_Approach}{{5.4}{40}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.6}{\ignorespaces Example of a Not Applicable Exploit for the MPI Tasking Testing\relax }}{41}{}\protected@file@percent }
\newlabel{fig:NA-exp}{{5.6}{41}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.7}{\ignorespaces Speedup and Efficiency of the MPI Tasking Approach for a Varying Number of Compute Nodes with an Increasing Problem Size\relax }}{41}{}\protected@file@percent }
\newlabel{fig:Spd-Eff-Task}{{5.7}{41}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.1}\it Introduction to the Subgraphing Approach}{42}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.2}\it Algorithm Design}{42}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.8}{\ignorespaces Example Graph Using the MPI Subgraphing Approach\relax }}{43}{}\protected@file@percent }
\newlabel{fig:subg}{{5.8}{43}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.4.2.1}Worker Nodes}{43}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.4.2.2}Root Node}{44}{}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {5.9}{\ignorespaces Frontier Merging and Data Distribution Process\relax }}{45}{}\protected@file@percent }
\newlabel{fig:front-merg}{{5.9}{45}}
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.4.2.3}Database Node}{45}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.4.2.4}MPI Tags}{45}{}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.3}\it Performance Expectations and Use Cases}{45}{}\protected@file@percent }
\newlabel{sec:perf_expec_subg}{{5.4.3}{45}}
\citation{lawrence_livermore_national_laboratory_mpip_nodate}
\@writefile{lot}{\contentsline {table}{\numberline {5.2}{\ignorespaces MPI Tags for the MPI Subgraphing Approach\relax }}{45}{}\protected@file@percent }
\newlabel{table:subg-tag}{{5.2}{45}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.4}\it Results}{45}{}\protected@file@percent }
\newlabel{sec:Subgraphing-Results}{{5.4.4}{45}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.10}{\ignorespaces First iteration results of MPI Subgraphing in terms of Runtime\relax }}{46}{}\protected@file@percent }
\newlabel{fig:Subg_base}{{5.10}{46}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.11}{\ignorespaces First iteration results of MPI Subgraphing in terms of Speedup and Efficiency\relax }}{47}{}\protected@file@percent }
\newlabel{fig:Subg_SE}{{5.11}{47}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.12}{\ignorespaces Modified Subgraphing Example Graph with Two New Edges\relax }}{49}{}\protected@file@percent }
\newlabel{fig:subg_mod}{{5.12}{49}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.13}{\ignorespaces Duplicate States Explored vs Actual Number of States for the 1-4 Service Tests\relax }}{50}{}\protected@file@percent }
\newlabel{fig:subg_dup}{{5.13}{50}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.14}{\ignorespaces Speedup and Efficiency of MPI Subgraphing when using a DHT\relax }}{52}{}\protected@file@percent }
\newlabel{fig:subg_DHT_Spd}{{5.14}{52}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.15}{\ignorespaces Runtime of MPI Subgraphing when using a DHT vs not using a DHT\relax }}{53}{}\protected@file@percent }
\newlabel{fig:subg_DHT_base}{{5.15}{53}}
\@writefile{lot}{\contentsline {table}{\numberline {5.2}{\ignorespaces MPI Tags for the MPI Subgraphing Approach\relax }}{46}{}\protected@file@percent }
\newlabel{table:subg-tag}{{5.2}{46}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.4.4}\it Results}{46}{}\protected@file@percent }
\newlabel{sec:Subgraphing-Results}{{5.4.4}{46}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.10}{\ignorespaces First iteration results of MPI Subgraphing in terms of Runtime\relax }}{47}{}\protected@file@percent }
\newlabel{fig:Subg_base}{{5.10}{47}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.11}{\ignorespaces First iteration results of MPI Subgraphing in terms of Speedup and Efficiency\relax }}{48}{}\protected@file@percent }
\newlabel{fig:Subg_SE}{{5.11}{48}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.12}{\ignorespaces Modified Subgraphing Example Graph with Two New Edges\relax }}{50}{}\protected@file@percent }
\newlabel{fig:subg_mod}{{5.12}{50}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.13}{\ignorespaces Duplicate States Explored vs Actual Number of States for the 1-4 Service Tests\relax }}{51}{}\protected@file@percent }
\newlabel{fig:subg_dup}{{5.13}{51}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.14}{\ignorespaces Speedup and Efficiency of MPI Subgraphing when using a DHT\relax }}{53}{}\protected@file@percent }
\newlabel{fig:subg_DHT_Spd}{{5.14}{53}}
\@writefile{lof}{\contentsline {figure}{\numberline {5.15}{\ignorespaces Runtime of MPI Subgraphing when using a DHT vs not using a DHT\relax }}{54}{}\protected@file@percent }
\newlabel{fig:subg_DHT_base}{{5.15}{54}}
\@setckpt{Chapter5}{
\setcounter{page}{54}
\setcounter{page}{55}
\setcounter{equation}{0}
\setcounter{enumi}{4}
\setcounter{enumii}{0}

View File

@ -46,7 +46,7 @@ attack and compliance graph generation.
\end{figure}
\TUsubsection{Algorithm Design}
The design of the tasking approach is to leverage a pipeline structure with the six tasks and MPI nodes. After its completion, each stage of the pipeline will pass the necessary data to the next stage through various MPI messages, where the next stage's nodes will receive the data and execute their tasks. The pipeline is considered fully saturated when each task has a dedicated node solely for executing work for that task. When there are less nodes than tasks, some nodes will process multiple tasks. When there are more nodes than tasks, additional nodes will be assigned to Tasks 1 and 2. Timings were collected in the serial approach for various networks that displayed more time requirements for Tasks 1 and 2, with larger network sizes requiring vastly more time to be taken in Tasks 1 and 2. As a result, additional nodes are assigned to Tasks 1 and 2. Node allocation can be seen in Figure \ref{fig:node-alloc}.
The design of the tasking approach is to leverage a pipeline structure with the six tasks and MPI nodes. After its completion, each stage of the pipeline will pass the necessary data to the next stage through various MPI messages, where the next stage's nodes will receive the data and execute their tasks. The pipeline is considered fully saturated when each task has a dedicated node solely for executing work for that task. When there are less nodes than tasks, some nodes will process multiple tasks. When there are more nodes than tasks, additional nodes will be assigned to Tasks 1 and 2. Timings were collected in the serial approach for various networks that displayed more time requirements for Tasks 1 and 2, with larger network sizes requiring vastly more time to be taken in Tasks 1 and 2. As a result, additional nodes are assigned to Tasks 1 and 2. Node allocation can be seen in Figure \ref{fig:node-alloc}. In this Figure, ``world.size()" is an integer value representing the number of nodes used in the program, and ``num_tasks" is an integer value representing the number of tasks used in the pipeline. By using a variable for the number of tasks, it allows for modular usage of the pipeline, where tasks can be added and removed without needing to change any allocation logic work; only communication between tasks may need to be modified, and the allocation can be adjusted relatively simply to include new tasks.
For determining which tasks should be handled by the root note, a few considerations were made, where minimizing communication cost and avoiding unnecessary complexity were the main two considerations. In the serial approach, the frontier queue was the primary data structure for the majority of the generation process. Rather than using a distributed queue or passing multiple sub-queues between nodes, the minimum cost option is to pass states individually. This approach also assists in reducing the complexity. Managing multiple frontier queues would require duplication checks, multiple nodes requesting data from and storing data into the database, and devising a strategy to maintain proper queue ordering, all of which would also increase the communication cost. As a result, the root node will be dedicated to Tasks 0 and 3.
@ -58,7 +58,7 @@ For determining which tasks should be handled by the root note, a few considerat
\end{figure}
\TUsubsubsection{Communication Structure}
The underlying communication structure for the tasking approach relies on a pseudo-ring structure. As seen in Figure \ref{fig:node-alloc}, nodes n$_2$, n$_3$, and n$_4$ are derived from the previous task's greatest node rank. To keep the development abstract, a custom send function checks the world size before sending. If the rank of the node that would receive a message is greater than the world size and therefore does not exist, the rank would then be ``looped around" and corrected to fit within the world size constraints. After the rank correction, the MPI Send function was then invoked with the proper node rank.
The underlying communication structure for the tasking approach relies on a pseudo-ring structure. As seen in Figure \ref{fig:node-alloc}, nodes n$_2$, n$_3$, and n$_4$ are derived from the previous task's greatest node rank. To keep the development abstract, a custom send function checks the world size (``world.size()") before sending. If the rank of the node that would receive a message is greater than the world size and therefore does not exist, the rank would then be ``looped around" and corrected to fit within the world size constraints. After the rank correction, the MPI Send function was then invoked with the proper node rank.
\TUsubsubsection{Task 0}
Task 0 is performed by the root node, and is a conditional task; it is not guaranteed to be executed at each pipeline iteration. Task 0 is only executed when the frontier is empty, but the database still holds unexplored states. This occurs when there are memory constraints, and database storage is performed during execution to offload the demand, as discussed in Section \ref{sec:db-stor}. After the completion of Task 0, the frontier has a state popped, and the root node sends the state to n$_1$. If the frontier is empty, the root node sends the finalize signal to all nodes.
@ -136,7 +136,7 @@ A series of tests were conducted on the platform described at the beginning of S
The results of the Tasking Approach can be seen in Figure \ref{fig:Spd-Eff-Task}. In terms of speedup, when the number of entries in the exploit list is small, the serial approach has better performance. This is expected due to the communication cost requiring more time than it does to generate a state, as discussed in Section \ref{sec:Task-perf-expec}. However, as the number of items in the exploit list increase, the Tasking Approach quickly begins to outperform the serial approach. It is notable that even when the tasking pipeline is not fully saturated (when there are less compute nodes assigned than tasks), the performance is still approximately equal to that of the serial approach. The other noticeable feature is that as more compute nodes are assigned, the speedup continues to increase.
In terms of efficiency, 2 compute nodes offer the greatest value since the speedup using 2 compute nodes is approximately 1.0 as the exploit list size increases. While the 2 compute node option does offer the greatest efficiency, it does not provide any speedup on any of the testing cases conducted. Similarly, testing with 3, 4, and 5 compute nodes were relatively high compared to the ``fully saturated pipeline" testing counterparts, but they also did not provide any speedup on any of the testing cases conducted. The results also demonstrate that an odd number of compute nodes in a fully saturated pipeline has better efficiency that an even number of compute nodes. When referring to Figure \ref{fig:node-alloc}, when there is an odd number number of compute nodes, Task 1 is allocated more nodes than Task 2. In the testing conducted, Task 1 was responsible for iterating through an increased size of the exploit list, so more nodes is advantageous in distributing the workload. However, since many exploits were not applicable, Task 2 had a lower workload where only 6 exploits could be applicable. This will be further elaborated upon in Section \ref{sec:FW}, but it is expected that efficiency will increase for real networks, since nodes in Task 2 will see a realistic workload.
In terms of efficiency, 2 compute nodes offer the greatest value since the speedup using 2 compute nodes is approximately 1.0 as the exploit list size increases. While the 2 compute node option does offer the greatest efficiency, it does not provide a speedup greater than 1.0 on any of the testing cases conducted. The results also demonstrate that an odd number of compute nodes in a fully saturated pipeline has better efficiency that an even number of compute nodes. When referring to Figure \ref{fig:node-alloc}, when there is an odd number number of compute nodes, Task 1 is allocated more nodes than Task 2. In the testing conducted, Task 1 was responsible for iterating through an increased size of the exploit list, so more nodes is advantageous in distributing the workload. However, since many exploits were not applicable, Task 2 had a lower workload where only 6 exploits could be applicable. This will be further elaborated upon in Section \ref{sec:FW}, but it is expected that efficiency will increase for real networks, since nodes in Task 2 will see a realistic workload.
\begin{figure}[htp]
\centering
@ -156,10 +156,10 @@ In terms of efficiency, 2 compute nodes offer the greatest value since the speed
\TUsection{Subgraphing Approach} \label{sec:Subgraphing_Approach}
\TUsubsection{Introduction to the Subgraphing Approach}
As opposed to the Tasking Approach described in Section \ref{sec:Tasking-Approach}, this Section introduces the Subgraphing Approach as a means of reducing runtime by frontier distribution though subgraphing. Section \ref{sec:db-stor} discusses that the frontier is expanded at a rate faster than can be processed. This approach attempts to distribute the frontier by assigning MPI nodes a starting state, and each node will generate a subgraph up to a designated depth-limit, where each node will then return their generated subgraph to a merging process. The author of \cite{li_concurrency_2019} presented an alternative method of frontier processing by utilizing OpenMP on a shared-memory system to assign each thread an individual state to explore that would then pass through a critical section. This approach is intended for a distributed system, and additionally differs in that each node will explore multiple states to form a subgraph, rather than exploring one individual state. This approach was implemented with two versions, and both collected results to draw conclusions in regards to speedup, efficiency, and scalability for attack graph and compliance graph generation.
As opposed to the Tasking Approach described in Section \ref{sec:Tasking-Approach}, this Section introduces the Subgraphing Approach as a means of reducing runtime by frontier distribution though subgraphing. Section \ref{sec:db-stor} discusses that the frontier is expanded at a rate faster than can be processed. This approach attempts to distribute the frontier by assigning MPI nodes a starting state, and each node will generate a subgraph up to a designated depth-limit, where each node will then return their generated subgraph to a merging process. The authors of \cite{li_concurrency_2019} presented an alternative method of frontier processing by utilizing OpenMP on a shared-memory system to assign each thread an individual state to explore that would then pass through a critical section. This approach is intended for a distributed system, and additionally differs in that each node will explore multiple states to form a subgraph, rather than exploring one individual state. This approach was implemented with two versions, and both collected results to draw conclusions in regards to speedup, efficiency, and scalability for attack graph and compliance graph generation.
\TUsubsection{Algorithm Design}
The design of the subgraphing approach consists of three main components: worker nodes, the root node, and a database node. The original design did not include a database node, but testing through the implementation of the tasking approach discussed in \ref{sec:T4T5} led to the inclusion of a dedicated database node. The overall design is predicated on the root node distributing data to all worker nodes and merging the worker nodes' subgraphs. Figure \ref{fig:subg} displays an example graph that utilizes three worker nodes with a depth limit of 3. Each worker node corresponds to a different graph state color in the figure. Each worker node explored a varying number of states, but did not proceed to explore a depth that exceeded the specified depth limit of 3. The final cluster of four nodes at the bottom of the graph represents one of the three worker node exploring the final states, while the other two nodes wait for further instruction. The following three subsections describe each component in further detail.
The design of the subgraphing approach consists of three main components: worker nodes, the root node, and a database node. The original design did not include a database node, but testing through the implementation of the tasking approach discussed in \ref{sec:T4T5} led to the inclusion of a dedicated database node. The overall design is predicated on the root node distributing data to all worker nodes and merging the worker nodes' subgraphs. Figure \ref{fig:subg} displays an example graph that utilizes three worker nodes with a depth limit of 3. Each worker node corresponds to a different graph state color and texture in the figure. Each worker node explored a varying number of states, but did not proceed to explore a depth that exceeded the specified depth limit of 3. The final cluster of four nodes at the bottom of the graph represents one of the three worker node exploring the final states, while the other two nodes wait for further instruction. The following three subsections describe each component in further detail.
\begin{figure}[htp]
\centering
@ -215,7 +215,7 @@ Similar to Section \ref{sec:tasking-tag} that discussed the usage of MPI Tags fo
This approach is intended to reduce runtime when the frontier grows at a rate faster than it can be explored. However, since this approach is designed for distributed systems, there is no guarantee that speedup can be achieved when duplicate work is performed. Not only is there wasted time by the worker nodes when duplicate work occurs, but duplicate work also contributes to increased communication between nodes to adequately explore the graph, and also leads to an increased number of merging calls by the root node. The ideal scenario for the subgraphing approach is when the graph is sparse, and the graph aligns more with a N-Ary tree structure where each node only has one parent. When the graph is sparse, there is a lower likelihood of duplicate work occurring, since worker nodes have a lower probability of exploring a graph state that connects to a different graph state that has been (or will be) explored by another worker node. If each graph state was able to have only one parent, there is a lower likelihood that worker nodes would explore the same graph cluster.
\TUsubsection{Results} \label{sec:Subgraphing-Results}
A series of tests were conducted on the platform described at the beginning of Section \ref{sec:test-platform}, and results were collected in regards to the effect of the MPI Subgraphing approach on the 4 example networks discussed in \ref{sec:Sync-Test}. All tests used synchronous firing. The initial results are seen in Figure \ref{fig:Subg_base}, which displays the results in terms of their runtimes. Only the serial runtime from the 2 Service test is displayed for conciseness. The results in terms of speedup and efficiency are seen in Figure \ref{fig:Subg_SE}.
A series of tests were conducted on the platform described at the beginning of Section \ref{sec:test-platform}, and results were collected in regards to the effect of the MPI Subgraphing approach on the 4 example networks discussed in \ref{sec:Sync-Test}. All tests used synchronous firing. Figure \ref{fig:Subg_base} shows the runtimes of each of the test cases. Only the serial runtime from the 2 Service test is displayed for conciseness. The results in terms of speedup and efficiency are seen in Figure \ref{fig:Subg_SE}.
\begin{figure}[htp]
\includegraphics[width=\linewidth]{"./Chapter5_img/MPISubg_RT_No_DHT.png"}
@ -232,7 +232,7 @@ A series of tests were conducted on the platform described at the beginning of S
\label{fig:Subg_SE}
\end{figure}
As noted from the Figures, the performance from this approach appears quite poor. The serial approach has greater performance in all cases, and the resulting speedups for all 4 service tests are below 1.0 when using the subgraphing approach. Likewise, the efficiency continues to worsen as more compute nodes are added to the system. There are a few reasons for this poor performance. Figure \ref{fig:subg} illustrates an example graph that is considered favorable to this approach in that branches are relatively distinct, and the graph is not fully connected. As a result in this example graph, each compute node is working on independent graph structures that do not overlap, and all work is distinct. This graph can quickly lead to issues through a few modifications. Figure \ref{fig:subg_mod} utilizes the same example graph from Figure \ref{fig:subg}, but adds two edges between the outside branches. With this arrangement, the 1st and 3rd compute nodes will perform work that overlaps with the work performed by the 2nd compute node. Both compute node 1 and compute node 3 will explore State 5, and depending on the depth limit, compute nodes 1 and 3 will continue to explore State 5's children, leading to almost all of compute 2's work being duplicated twice. This duplicate work occurs at an alarming rate in the service tests that were performed. Figure \ref{fig:subg_dup} illustrates that there is an extraordinarily large amount of duplicate work occurring in the testing, which substantially increases the runtime of this approach. The duplicate work, as discussed in Section \ref{sec:perf_expec_subg}, not only wastes compute nodes' times and leads to a longer exploration process, but it also requires the root node to perform more merging and cleanup work. When using mpiP (a light-weight MPI profiler provided by Lawrence Livermore National Laboratory) \cite{lawrence_livermore_national_laboratory_mpip_nodate}, it was measured that this extra merging and cleanup work performed by the root causes additional delays in distributing data, and the compute nodes spent a combined 35{\%} total application runtime just waiting to receive more data from the root node in the 1 service test.
As noted from Figures \ref{fig:Subg_base} and \ref{fig:Subg_SE}, the performance from this approach appears quite poor. The serial approach has greater performance in all cases, and the resulting speedups for all 4 service tests are below 1.0 when using the subgraphing approach. Likewise, the efficiency continues to worsen as more compute nodes are added to the system. There are a few reasons for this poor performance. Figure \ref{fig:subg} illustrates an example graph that is considered favorable to this approach in that branches are relatively distinct, and the graph is not fully connected. As a result in this example graph, each compute node is working on independent graph structures that do not overlap, and all work is distinct. This graph can quickly lead to issues through a few modifications. Figure \ref{fig:subg_mod} utilizes the same example graph from Figure \ref{fig:subg}, but adds two edges between the outside branches. With this arrangement, the 1st and 3rd compute nodes will perform work that overlaps with the work performed by the 2nd compute node. Both compute node 1 and compute node 3 will explore State 5, and depending on the depth limit, compute nodes 1 and 3 will continue to explore State 5's children, leading to almost all of compute 2's work being duplicated twice. This duplicate work occurs at an alarming rate in the service tests that were performed. Figure \ref{fig:subg_dup} illustrates that there is an extraordinarily large amount of duplicate work occurring in the testing, which substantially increases the runtime of this approach. The duplicate work, as discussed in Section \ref{sec:perf_expec_subg}, not only wastes compute nodes' times and leads to a longer exploration process, but it also requires the root node to perform more merging and cleanup work. When using mpiP (a light-weight MPI profiler provided by Lawrence Livermore National Laboratory) \cite{lawrence_livermore_national_laboratory_mpip_nodate}, it was measured that this extra merging and cleanup work performed by the root causes additional delays in distributing data, and the compute nodes spent a combined 35{\%} total application runtime just waiting to receive more data from the root node in the 1 service test.
\begin{figure}[htp]
\includegraphics[width=\linewidth]{"./Chapter5_img/dup.drawio.png"}
@ -248,7 +248,7 @@ As noted from the Figures, the performance from this approach appears quite poor
\label{fig:subg_dup}
\end{figure}
To minimize the duplicate work performed, a second approach using a distributed hash table (DHT) was attempted. With a DHT, each compute node would check to ensure that they were not duplicating work. This would limit the work needed by the root node, but each worker node would need to search the DHT. Using a DHT would increase the communication overhead, but if the communication overhead was less than the time taken for duplicate work or was minimal enough to still process the frontier at a greater rate than the serial approach, then the distributed hash table would be considered advantageous. Rather than devising a unique strategy for a distributed hash table, this work made use of the Berkely Container Library (BCL), which is open-source and provides distributed data structures with easy-to-use interfaces. Since BCL is a header-only library, it allowed for minimal code alterations, and primarily just needed to be dropped into the system. Testing was repeated with an identical setup to the approach without a DHT. The results in terms of speedup and efficiency are seen in Figure \ref{fig:subg_DHT_Spd}. Results in terms of runtime between the DHT approach and the base approach are seen in Figure \ref{fig:subg_DHT_base}.
To minimize the duplicate work performed, a second approach using a distributed hash table (DHT) was attempted. With a DHT, each compute node would check to ensure that they were not duplicating work. This would limit the work needed by the root node, but each worker node would need to search the DHT. Using a DHT would increase the communication overhead, but if the communication overhead was less than the time taken for duplicate work or was minimal enough to still process the frontier at a greater rate than the serial approach, then the distributed hash table would be considered advantageous. Rather than devising a unique strategy for a distributed hash table, this work made use of the Berkely Container Library (BCL), which is an open-source library and provides distributed data structures with easy-to-use interfaces. Since BCL is a header-only library, it allowed for minimal code alterations, and primarily just needed to be dropped into the system. Testing was repeated with an identical setup to the approach without a DHT. The results in terms of speedup and efficiency are seen in Figure \ref{fig:subg_DHT_Spd}. Results in terms of runtime between the DHT approach and the base approach are seen in Figure \ref{fig:subg_DHT_base}.
\begin{figure}
\centering

View File

@ -1,12 +1,12 @@
\relax
\@writefile{toc}{\contentsline {chapter}{\numberline {CHAPTER 6: }{\bf \uppercase {CONCLUSIONS AND FUTURE WORKS}}}{54}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {6.1}\bf Conclusions}{54}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {6.2}\bf Future Work}{55}{}\protected@file@percent }
\newlabel{sec:FW}{{6.2}{55}}
\@writefile{lof}{\contentsline {figure}{\numberline {6.1}{\ignorespaces Possible Method for Blending MPI and OpenMP for Task 2 of the MPI Tasking Approach\relax }}{56}{}\protected@file@percent }
\newlabel{fig:OMP_MPI_Blend}{{6.1}{56}}
\@writefile{toc}{\contentsline {chapter}{\numberline {CHAPTER 6: }{\bf \uppercase {CONCLUSIONS AND FUTURE WORKS}}}{55}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {6.1}\bf Conclusions}{55}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {6.2}\bf Future Work}{56}{}\protected@file@percent }
\newlabel{sec:FW}{{6.2}{56}}
\@writefile{lof}{\contentsline {figure}{\numberline {6.1}{\ignorespaces Possible Method for Blending MPI and OpenMP for Task 2 of the MPI Tasking Approach\relax }}{57}{}\protected@file@percent }
\newlabel{fig:OMP_MPI_Blend}{{6.1}{57}}
\@setckpt{Chapter6}{
\setcounter{page}{58}
\setcounter{page}{59}
\setcounter{equation}{0}
\setcounter{enumi}{4}
\setcounter{enumii}{0}

View File

@ -29,9 +29,9 @@
\bibcite{baloyi_guidelines_2019}{7}
\bibcite{allman_complying_2006}{8}
\@writefile{toc}{{\hfill \ }}
\@writefile{toc}{\contentsline {section}{\hspace {-\parindent }NOMENCLATURE}{58}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\hspace {-\parindent }NOMENCLATURE}{59}{}\protected@file@percent }
\@writefile{toc}{\addvspace {10pt}}
\@writefile{toc}{\contentsline {section}{\hspace {-\parindent }BIBLIOGRAPHY}{58}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\hspace {-\parindent }BIBLIOGRAPHY}{59}{}\protected@file@percent }
\@writefile{toc}{{\hfill \ }}
\bibcite{noauthor_sarbanes-oxley_2002}{9}
\bibcite{noauthor_health_1996}{10}
@ -52,10 +52,10 @@
\bibcite{li_concurrency_2019}{25}
\bibcite{9150145}{26}
\bibcite{7087377}{27}
\bibcite{li_combining_2019}{28}
\bibcite{CVE-2019-10747}{29}
\bibcite{CVE-2019-10747}{28}
\bibcite{li_combining_2019}{29}
\bibcite{louthan_hybrid_2011}{30}
\bibcite{pacheco_introduction_2011}{31}
\bibcite{lawrence_livermore_national_laboratory_mpip_nodate}{32}
\bibstyle{ieeetr}
\gdef \@abspage@last{72}
\gdef \@abspage@last{73}

View File

@ -1,24 +1,24 @@
{\vspace {\baselineskip }}
\contentsline {figure}{\numberline {3.1}{\ignorespaces Path Walking to State 14\relax }}{9}{}%
\contentsline {figure}{\numberline {3.2}{\ignorespaces Color Coding a Small Network Based on Violations\relax }}{10}{}%
\contentsline {figure}{\numberline {4.1}{\ignorespaces A network without Synchronous Firing generating infeasible states\relax }}{18}{}%
\contentsline {figure}{\numberline {4.2}{\ignorespaces Inclusion of Synchronous Firing into GNU Bison, GNU Flex, and the overall program\relax }}{21}{}%
\contentsline {figure}{\numberline {4.3}{\ignorespaces Synchronous Firing in the Graph Generation Process\relax }}{23}{}%
\contentsline {figure}{\numberline {4.4}{\ignorespaces Bar Graph and Line Graph Representations of Synchronous Firing on Runtime\relax }}{26}{}%
\contentsline {figure}{\numberline {4.5}{\ignorespaces Bar Graph and Line Graph Representations of Synchronous Firing on State Space\relax }}{27}{}%
\contentsline {figure}{\numberline {5.1}{\ignorespaces Task Overview of the Attack and Compliance Graph Generation Process\relax }}{30}{}%
\contentsline {figure}{\numberline {5.2}{\ignorespaces Node Allocation for each Task\relax }}{32}{}%
\contentsline {figure}{\numberline {5.3}{\ignorespaces Data Distribution of Task One\relax }}{34}{}%
\contentsline {figure}{\numberline {5.4}{\ignorespaces Communication From Task 1 to Task 2 when the Number of Nodes Allocated is Equal\relax }}{35}{}%
\contentsline {figure}{\numberline {5.5}{\ignorespaces Communication From Task 1 to Task 2 when Task 1 Has More Nodes Allocated\relax }}{36}{}%
\contentsline {figure}{\numberline {5.6}{\ignorespaces Example of a Not Applicable Exploit for the MPI Tasking Testing\relax }}{40}{}%
\contentsline {figure}{\numberline {5.7}{\ignorespaces Speedup and Efficiency of the MPI Tasking Approach for a Varying Number of Compute Nodes with an Increasing Problem Size\relax }}{40}{}%
\contentsline {figure}{\numberline {5.8}{\ignorespaces Example Graph Using the MPI Subgraphing Approach\relax }}{42}{}%
\contentsline {figure}{\numberline {5.9}{\ignorespaces Frontier Merging and Data Distribution Process\relax }}{44}{}%
\contentsline {figure}{\numberline {5.10}{\ignorespaces First iteration results of MPI Subgraphing in terms of Runtime\relax }}{46}{}%
\contentsline {figure}{\numberline {5.11}{\ignorespaces First iteration results of MPI Subgraphing in terms of Speedup and Efficiency\relax }}{47}{}%
\contentsline {figure}{\numberline {5.12}{\ignorespaces Modified Subgraphing Example Graph with Two New Edges\relax }}{49}{}%
\contentsline {figure}{\numberline {5.13}{\ignorespaces Duplicate States Explored vs Actual Number of States for the 1-4 Service Tests\relax }}{50}{}%
\contentsline {figure}{\numberline {5.14}{\ignorespaces Speedup and Efficiency of MPI Subgraphing when using a DHT\relax }}{52}{}%
\contentsline {figure}{\numberline {5.15}{\ignorespaces Runtime of MPI Subgraphing when using a DHT vs not using a DHT\relax }}{53}{}%
\contentsline {figure}{\numberline {6.1}{\ignorespaces Possible Method for Blending MPI and OpenMP for Task 2 of the MPI Tasking Approach\relax }}{56}{}%
\contentsline {figure}{\numberline {4.1}{\ignorespaces A network without Synchronous Firing generating infeasible states\relax }}{19}{}%
\contentsline {figure}{\numberline {4.2}{\ignorespaces Inclusion of Synchronous Firing into GNU Bison, GNU Flex, and the overall program\relax }}{22}{}%
\contentsline {figure}{\numberline {4.3}{\ignorespaces Synchronous Firing in the Graph Generation Process\relax }}{24}{}%
\contentsline {figure}{\numberline {4.4}{\ignorespaces Bar Graph and Line Graph Representations of Synchronous Firing on Runtime\relax }}{27}{}%
\contentsline {figure}{\numberline {4.5}{\ignorespaces Bar Graph and Line Graph Representations of Synchronous Firing on State Space\relax }}{28}{}%
\contentsline {figure}{\numberline {5.1}{\ignorespaces Task Overview of the Attack and Compliance Graph Generation Process\relax }}{31}{}%
\contentsline {figure}{\numberline {5.2}{\ignorespaces Node Allocation for each Task\relax }}{33}{}%
\contentsline {figure}{\numberline {5.3}{\ignorespaces Data Distribution of Task One\relax }}{35}{}%
\contentsline {figure}{\numberline {5.4}{\ignorespaces Communication From Task 1 to Task 2 when the Number of Nodes Allocated is Equal\relax }}{36}{}%
\contentsline {figure}{\numberline {5.5}{\ignorespaces Communication From Task 1 to Task 2 when Task 1 Has More Nodes Allocated\relax }}{37}{}%
\contentsline {figure}{\numberline {5.6}{\ignorespaces Example of a Not Applicable Exploit for the MPI Tasking Testing\relax }}{41}{}%
\contentsline {figure}{\numberline {5.7}{\ignorespaces Speedup and Efficiency of the MPI Tasking Approach for a Varying Number of Compute Nodes with an Increasing Problem Size\relax }}{41}{}%
\contentsline {figure}{\numberline {5.8}{\ignorespaces Example Graph Using the MPI Subgraphing Approach\relax }}{43}{}%
\contentsline {figure}{\numberline {5.9}{\ignorespaces Frontier Merging and Data Distribution Process\relax }}{45}{}%
\contentsline {figure}{\numberline {5.10}{\ignorespaces First iteration results of MPI Subgraphing in terms of Runtime\relax }}{47}{}%
\contentsline {figure}{\numberline {5.11}{\ignorespaces First iteration results of MPI Subgraphing in terms of Speedup and Efficiency\relax }}{48}{}%
\contentsline {figure}{\numberline {5.12}{\ignorespaces Modified Subgraphing Example Graph with Two New Edges\relax }}{50}{}%
\contentsline {figure}{\numberline {5.13}{\ignorespaces Duplicate States Explored vs Actual Number of States for the 1-4 Service Tests\relax }}{51}{}%
\contentsline {figure}{\numberline {5.14}{\ignorespaces Speedup and Efficiency of MPI Subgraphing when using a DHT\relax }}{53}{}%
\contentsline {figure}{\numberline {5.15}{\ignorespaces Runtime of MPI Subgraphing when using a DHT vs not using a DHT\relax }}{54}{}%
\contentsline {figure}{\numberline {6.1}{\ignorespaces Possible Method for Blending MPI and OpenMP for Task 2 of the MPI Tasking Approach\relax }}{57}{}%

View File

@ -1,4 +1,4 @@
This is pdfTeX, Version 3.141592653-2.6-1.40.23 (TeX Live 2021/Arch Linux) (preloaded format=pdflatex 2022.3.21) 28 MAR 2022 16:22
This is pdfTeX, Version 3.141592653-2.6-1.40.23 (TeX Live 2021/Arch Linux) (preloaded format=pdflatex 2022.3.21) 3 APR 2022 19:21
entering extended mode
restricted \write18 enabled.
%&-line parsing enabled.
@ -242,15 +242,15 @@ Overfull \hbox (1.75291pt too wide) in paragraph at lines 24--24
[6
]
Overfull \hbox (1.5755pt too wide) in paragraph at lines 44--44
Overfull \hbox (1.5755pt too wide) in paragraph at lines 45--45
[] []\OT1/cmr/bx/n/12 PARALLELIZATION US-ING MES-SAGE PASS-ING IN-TER-
[]
)
[7])
\tf@toc=\write3
\openout3 = `Schrick-Noah_MS-Thesis.toc'.
[7] [8] (./Schrick-Noah_MS-Thesis.lot)
[8] (./Schrick-Noah_MS-Thesis.lot)
\tf@lot=\write4
\openout4 = `Schrick-Noah_MS-Thesis.lot'.
@ -292,6 +292,9 @@ File: ./Chapter3_img/PW.png Graphic file (type png)
<use ./Chapter3_img/PW.png>
Package pdftex.def Info: ./Chapter3_img/PW.png used on input line 17.
(pdftex.def) Requested size: 469.75499pt x 257.74918pt.
LaTeX Warning: Citation `Graphviz' on page 8 undefined on input line 28.
<./Chapter3_img/CC.png, id=67, 658.46pt x 527.9725pt>
File: ./Chapter3_img/CC.png Graphic file (type png)
<use ./Chapter3_img/CC.png>
@ -302,222 +305,230 @@ Package pdftex.def Info: ./Chapter3_img/CC.png used on input line 32.
] [9 <./Chapter3_img/PW.png>] [10 <./Chapter3_img/CC.png>] [11] [12] [13]
[14] [15]) [16]
] [9 <./Chapter3_img/PW.png>]
LaTeX Warning: Citation `nichols_2018' on page 10 undefined on input line 43.
[10 <./Chapter3_img/CC.png>] [11] [12]
LaTeX Warning: Citation `nichols_2018' on page 13 undefined on input line 71.
[13] [14] [15] [16]) [17]
\openout2 = `Chapter4.aux'.
(./Chapter4.tex
CHAPTER 4.
<./Chapter4_img/non-sync_ex.drawio.png, id=97, 1014.79124pt x 400.49625pt>
<./Chapter4_img/non-sync_ex.drawio.png, id=100, 1014.79124pt x 400.49625pt>
File: ./Chapter4_img/non-sync_ex.drawio.png Graphic file (type png)
<use ./Chapter4_img/non-sync_ex.drawio.png>
Package pdftex.def Info: ./Chapter4_img/non-sync_ex.drawio.png used on input l
ine 17.
(pdftex.def) Requested size: 469.75499pt x 185.3916pt.
[17
[18
] [18 <./Chapter4_img/non-sync_ex.drawio.png>] [19]
] [19 <./Chapter4_img/non-sync_ex.drawio.png>] [20]
Overfull \hbox (30.42026pt too wide) in paragraph at lines 49--49
[] \OT1/cmtt/m/n/12 <exploit> ::= <group name> "group" "exploit" <identifier>
, (<parameter-list>)=
[]
<./Chapter4_img/Bison-Flex.png, id=109, 1447.4075pt x 492.84125pt>
File: ./Chapter4_img/Bison-Flex.png Graphic file (type png)
<use ./Chapter4_img/Bison-Flex.png>
Package pdftex.def Info: ./Chapter4_img/Bison-Flex.png used on input line 61.
(pdftex.def) Requested size: 469.75499pt x 159.95341pt.
[20] [21 <./Chapter4_img/Bison-Flex.png (PNG copy)>]
<./Chapter4_img/Sync-Fire.png, id=118, 489.83pt x 1052.93375pt>
<./Chapter4_img/Bison-Flex-v2.png, id=113, 1029.8475pt x 330.23375pt>
File: ./Chapter4_img/Bison-Flex-v2.png Graphic file (type png)
<use ./Chapter4_img/Bison-Flex-v2.png>
Package pdftex.def Info: ./Chapter4_img/Bison-Flex-v2.png used on input line 6
1.
(pdftex.def) Requested size: 469.75499pt x 150.62946pt.
[21] [22 <./Chapter4_img/Bison-Flex-v2.png>]
<./Chapter4_img/Sync-Fire.png, id=122, 489.83pt x 1052.93375pt>
File: ./Chapter4_img/Sync-Fire.png Graphic file (type png)
<use ./Chapter4_img/Sync-Fire.png>
Package pdftex.def Info: ./Chapter4_img/Sync-Fire.png used on input line 85.
(pdftex.def) Requested size: 244.9144pt x 526.46559pt.
[22] [23 <./Chapter4_img/Sync-Fire.png>] [24]
<./Chapter4_img/Sync-Runtime-Bar.png, id=129, 435.591pt x 238.491pt>
[23] [24 <./Chapter4_img/Sync-Fire.png>] [25]
<./Chapter4_img/Sync-Runtime-Bar.png, id=133, 435.591pt x 238.491pt>
File: ./Chapter4_img/Sync-Runtime-Bar.png Graphic file (type png)
<use ./Chapter4_img/Sync-Runtime-Bar.png>
Package pdftex.def Info: ./Chapter4_img/Sync-Runtime-Bar.png used on input lin
e 123.
(pdftex.def) Requested size: 469.75499pt x 257.2098pt.
<./Chapter4_img/Sync-Runtime.png, id=130, 402.522pt x 236.082pt>
<./Chapter4_img/Sync-Runtime.png, id=134, 402.522pt x 236.082pt>
File: ./Chapter4_img/Sync-Runtime.png Graphic file (type png)
<use ./Chapter4_img/Sync-Runtime.png>
Package pdftex.def Info: ./Chapter4_img/Sync-Runtime.png used on input line 12
4.
(pdftex.def) Requested size: 469.75499pt x 275.52676pt.
<./Chapter4_img/Sync-StateSpace-Bar.png, id=131, 434.058pt x 230.169pt>
<./Chapter4_img/Sync-StateSpace-Bar.png, id=135, 434.058pt x 230.169pt>
File: ./Chapter4_img/Sync-StateSpace-Bar.png Graphic file (type png)
<use ./Chapter4_img/Sync-StateSpace-Bar.png>
Package pdftex.def Info: ./Chapter4_img/Sync-StateSpace-Bar.png used on input
line 131.
(pdftex.def) Requested size: 469.75499pt x 249.11264pt.
<./Chapter4_img/Sync-StateSpace.png, id=132, 402.741pt x 236.301pt>
<./Chapter4_img/Sync-StateSpace.png, id=136, 402.741pt x 236.301pt>
File: ./Chapter4_img/Sync-StateSpace.png Graphic file (type png)
<use ./Chapter4_img/Sync-StateSpace.png>
Package pdftex.def Info: ./Chapter4_img/Sync-StateSpace.png used on input line
132.
(pdftex.def) Requested size: 469.75499pt x 275.63454pt.
) [25] [26 <./Chapter4_img/Sync-Runtime-Bar.png> <./Chapter4_img/Sync-Runtime.p
ng>] [27 <./Chapter4_img/Sync-StateSpace-Bar.png> <./Chapter4_img/Sync-StateSpa
) [26] [27 <./Chapter4_img/Sync-Runtime-Bar.png> <./Chapter4_img/Sync-Runtime.p
ng>] [28 <./Chapter4_img/Sync-StateSpace-Bar.png> <./Chapter4_img/Sync-StateSpa
ce.png>]
\openout2 = `Chapter5.aux'.
(./Chapter5.tex
CHAPTER 5.
[28
[29
]
<./Chapter5_img/horiz_task.drawio.png, id=150, 1181.41376pt x 785.93625pt>
<./Chapter5_img/horiz_task.drawio.png, id=154, 1181.41376pt x 785.93625pt>
File: ./Chapter5_img/horiz_task.drawio.png Graphic file (type png)
<use ./Chapter5_img/horiz_task.drawio.png>
Package pdftex.def Info: ./Chapter5_img/horiz_task.drawio.png used on input li
ne 42.
(pdftex.def) Requested size: 469.75499pt x 312.49811pt.
[29] [30 <./Chapter5_img/horiz_task.drawio.png>]
<./Chapter5_img/node-alloc.png, id=158, 818.30719pt x 536.75531pt>
[30] [31 <./Chapter5_img/horiz_task.drawio.png>]
<./Chapter5_img/node-alloc.png, id=162, 818.30719pt x 536.75531pt>
File: ./Chapter5_img/node-alloc.png Graphic file (type png)
<use ./Chapter5_img/node-alloc.png>
Package pdftex.def Info: ./Chapter5_img/node-alloc.png used on input line 54.
(pdftex.def) Requested size: 469.75499pt x 308.1323pt.
[31] [32 <./Chapter5_img/node-alloc.PNG>]
<./Chapter5_img/Task1-Data-Dist.png, id=167, 1017.04968pt x 336.50719pt>
[32] [33 <./Chapter5_img/node-alloc.PNG>]
<./Chapter5_img/Task1-Data-Dist.png, id=171, 1017.04968pt x 336.50719pt>
File: ./Chapter5_img/Task1-Data-Dist.png Graphic file (type png)
<use ./Chapter5_img/Task1-Data-Dist.png>
Package pdftex.def Info: ./Chapter5_img/Task1-Data-Dist.png used on input line
70.
(pdftex.def) Requested size: 469.75499pt x 155.42674pt.
[33]
<./Chapter5_img/Task1-Case1.png, id=171, 586.44093pt x 339.51843pt>
[34]
<./Chapter5_img/Task1-Case1.png, id=175, 586.44093pt x 339.51843pt>
File: ./Chapter5_img/Task1-Case1.png Graphic file (type png)
<use ./Chapter5_img/Task1-Case1.png>
Package pdftex.def Info: ./Chapter5_img/Task1-Case1.png used on input line 79.
(pdftex.def) Requested size: 469.75499pt x 271.9622pt.
<./Chapter5_img/Task1-Case2.png, id=172, 702.37407pt x 414.79968pt>
<./Chapter5_img/Task1-Case2.png, id=176, 702.37407pt x 414.79968pt>
File: ./Chapter5_img/Task1-Case2.png Graphic file (type png)
<use ./Chapter5_img/Task1-Case2.png>
Package pdftex.def Info: ./Chapter5_img/Task1-Case2.png used on input line 86.
(pdftex.def) Requested size: 469.75499pt x 277.43332pt.
[34 <./Chapter5_img/Task1-Data-Dist.PNG>] [35 <./Chapter5_img/Task1-Case1.PNG>
] [36 <./Chapter5_img/Task1-Case2.PNG>]
[35 <./Chapter5_img/Task1-Data-Dist.PNG>] [36 <./Chapter5_img/Task1-Case1.PNG>
] [37 <./Chapter5_img/Task1-Case2.PNG>]
LaTeX Warning: No positions in optional float specifier.
Default added (so using `tbp') on input line 104.
[37] [38]
<./Chapter5_img/NA.png, id=192, 369.38pt x 118.4425pt>
[38] [39]
<./Chapter5_img/NA.png, id=197, 369.38pt x 118.4425pt>
File: ./Chapter5_img/NA.png Graphic file (type png)
<use ./Chapter5_img/NA.png>
Package pdftex.def Info: ./Chapter5_img/NA.png used on input line 143.
(pdftex.def) Requested size: 184.68954pt x 59.2211pt.
<./Chapter5_img/Speedup-Esize-Tasking.png, id=193, 620.208pt x 321.93pt>
<./Chapter5_img/Speedup-Esize-Tasking.png, id=198, 620.208pt x 321.93pt>
File: ./Chapter5_img/Speedup-Esize-Tasking.png Graphic file (type png)
<use ./Chapter5_img/Speedup-Esize-Tasking.png>
Package pdftex.def Info: ./Chapter5_img/Speedup-Esize-Tasking.png used on inpu
t line 151.
(pdftex.def) Requested size: 469.75499pt x 243.83916pt.
<./Chapter5_img/Eff-Esize-Tasking.png, id=194, 620.208pt x 322.149pt>
<./Chapter5_img/Eff-Esize-Tasking.png, id=199, 620.208pt x 322.149pt>
File: ./Chapter5_img/Eff-Esize-Tasking.png Graphic file (type png)
<use ./Chapter5_img/Eff-Esize-Tasking.png>
Package pdftex.def Info: ./Chapter5_img/Eff-Esize-Tasking.png used on input li
ne 152.
(pdftex.def) Requested size: 469.75499pt x 244.00504pt.
[39] [40 <./Chapter5_img/NA.png> <./Chapter5_img/Speedup-Esize-Tasking.png> <.
[40] [41 <./Chapter5_img/NA.png> <./Chapter5_img/Speedup-Esize-Tasking.png> <.
/Chapter5_img/Eff-Esize-Tasking.png>]
<./Chapter5_img/subgraphing.drawio.png, id=204, 824.07875pt x 743.77875pt>
<./Chapter5_img/subgraphing.drawio.png, id=208, 824.07875pt x 743.77875pt>
File: ./Chapter5_img/subgraphing.drawio.png Graphic file (type png)
<use ./Chapter5_img/subgraphing.drawio.png>
Package pdftex.def Info: ./Chapter5_img/subgraphing.drawio.png used on input l
ine 166.
(pdftex.def) Requested size: 247.22552pt x 223.13535pt.
[41] [42 <./Chapter5_img/subgraphing.drawio.png>]
<./Chapter5_img/front_merge.drawio.png, id=212, 1059.96pt x 465.74pt>
[42] [43 <./Chapter5_img/subgraphing.drawio.png>]
<./Chapter5_img/front_merge.drawio.png, id=216, 1059.96pt x 465.74pt>
File: ./Chapter5_img/front_merge.drawio.png Graphic file (type png)
<use ./Chapter5_img/front_merge.drawio.png>
Package pdftex.def Info: ./Chapter5_img/front_merge.drawio.png used on input l
ine 179.
(pdftex.def) Requested size: 469.75499pt x 206.4044pt.
[43]
[44]
LaTeX Warning: No positions in optional float specifier.
Default added (so using `tbp') on input line 193.
[44 <./Chapter5_img/front_merge.drawio.png>]
<./Chapter5_img/MPISubg_RT_No_DHT.png, id=221, 399.018pt x 250.098pt>
[45 <./Chapter5_img/front_merge.drawio.png>]
<./Chapter5_img/MPISubg_RT_No_DHT.png, id=225, 399.018pt x 250.098pt>
File: ./Chapter5_img/MPISubg_RT_No_DHT.png Graphic file (type png)
<use ./Chapter5_img/MPISubg_RT_No_DHT.png>
Package pdftex.def Info: ./Chapter5_img/MPISubg_RT_No_DHT.png used on input li
ne 221.
(pdftex.def) Requested size: 469.75499pt x 294.44524pt.
<./Chapter5_img/no_DHT_Spd.png, id=222, 424.422pt x 249.003pt>
<./Chapter5_img/no_DHT_Spd.png, id=226, 424.422pt x 249.003pt>
File: ./Chapter5_img/no_DHT_Spd.png Graphic file (type png)
<use ./Chapter5_img/no_DHT_Spd.png>
Package pdftex.def Info: ./Chapter5_img/no_DHT_Spd.png used on input line 229.
(pdftex.def) Requested size: 469.75499pt x 275.60631pt.
<./Chapter5_img/no_DHT_eff.png, id=223, 429.021pt x 249.222pt>
<./Chapter5_img/no_DHT_eff.png, id=227, 429.021pt x 249.222pt>
File: ./Chapter5_img/no_DHT_eff.png Graphic file (type png)
<use ./Chapter5_img/no_DHT_eff.png>
Package pdftex.def Info: ./Chapter5_img/no_DHT_eff.png used on input line 230.
(pdftex.def) Requested size: 469.75499pt x 272.8939pt.
[45] [46 <./Chapter5_img/MPISubg_RT_No_DHT.png>] [47 <./Chapter5_img/no_DHT_Sp
[46] [47 <./Chapter5_img/MPISubg_RT_No_DHT.png>] [48 <./Chapter5_img/no_DHT_Sp
d.png> <./Chapter5_img/no_DHT_eff.png>]
<./Chapter5_img/dup.drawio.png, id=236, 824.07875pt x 743.77875pt>
<./Chapter5_img/dup.drawio.png, id=240, 824.07875pt x 743.77875pt>
File: ./Chapter5_img/dup.drawio.png Graphic file (type png)
<use ./Chapter5_img/dup.drawio.png>
Package pdftex.def Info: ./Chapter5_img/dup.drawio.png used on input line 238.
(pdftex.def) Requested size: 469.75499pt x 423.98099pt.
<./Chapter5_img/Dup_DHT.png, id=237, 796.065pt x 483.99pt>
<./Chapter5_img/Dup_DHT.png, id=241, 796.065pt x 483.99pt>
File: ./Chapter5_img/Dup_DHT.png Graphic file (type png)
<use ./Chapter5_img/Dup_DHT.png>
Package pdftex.def Info: ./Chapter5_img/Dup_DHT.png used on input line 245.
(pdftex.def) Requested size: 469.75499pt x 285.59593pt.
[48] [49 <./Chapter5_img/dup.drawio.png>] [50 <./Chapter5_img/Dup_DHT.png>]
<./Chapter5_img/DHT_Spd.png, id=250, 421.575pt x 233.235pt>
[49] [50 <./Chapter5_img/dup.drawio.png>] [51 <./Chapter5_img/Dup_DHT.png>]
<./Chapter5_img/DHT_Spd.png, id=254, 421.575pt x 233.235pt>
File: ./Chapter5_img/DHT_Spd.png Graphic file (type png)
<use ./Chapter5_img/DHT_Spd.png>
Package pdftex.def Info: ./Chapter5_img/DHT_Spd.png used on input line 255.
(pdftex.def) Requested size: 469.75499pt x 259.89395pt.
<./Chapter5_img/DHT_Eff.png, id=251, 422.889pt x 233.235pt>
<./Chapter5_img/DHT_Eff.png, id=255, 422.889pt x 233.235pt>
File: ./Chapter5_img/DHT_Eff.png Graphic file (type png)
<use ./Chapter5_img/DHT_Eff.png>
Package pdftex.def Info: ./Chapter5_img/DHT_Eff.png used on input line 256.
(pdftex.def) Requested size: 469.75499pt x 259.08965pt.
<./Chapter5_img/DHT_noDHT.png, id=252, 806.577pt x 496.692pt>
<./Chapter5_img/DHT_noDHT.png, id=256, 806.577pt x 496.692pt>
File: ./Chapter5_img/DHT_noDHT.png Graphic file (type png)
<use ./Chapter5_img/DHT_noDHT.png>
Package pdftex.def Info: ./Chapter5_img/DHT_noDHT.png used on input line 262.
(pdftex.def) Requested size: 469.75499pt x 289.27902pt.
) [51] [52 <./Chapter5_img/DHT_Spd.png> <./Chapter5_img/DHT_Eff.png>] [53 <./Ch
) [52] [53 <./Chapter5_img/DHT_Spd.png> <./Chapter5_img/DHT_Eff.png>] [54 <./Ch
apter5_img/DHT_noDHT.png>]
\openout2 = `Chapter6.aux'.
(./Chapter6.tex
CHAPTER 6.
[54
[55
]
<./Chapter5_img/MPI-OpenMP-Blend.PNG, id=268, 1008.01593pt x 606.01407pt>
<./Chapter5_img/MPI-OpenMP-Blend.PNG, id=272, 1008.01593pt x 606.01407pt>
File: ./Chapter5_img/MPI-OpenMP-Blend.PNG Graphic file (type png)
<use ./Chapter5_img/MPI-OpenMP-Blend.PNG>
Package pdftex.def Info: ./Chapter5_img/MPI-OpenMP-Blend.PNG used on input lin
e 17.
(pdftex.def) Requested size: 469.75499pt x 282.41318pt.
[55] [56 <./Chapter5_img/MPI-OpenMP-Blend.PNG>]) [57]
(./Schrick-Noah_MS-Thesis.bbl [58
[56] [57 <./Chapter5_img/MPI-OpenMP-Blend.PNG>]) [58]
(./Schrick-Noah_MS-Thesis.bbl [59
@ -545,14 +556,18 @@ Underfull \hbox (badness 2119) in paragraph at lines 51--54
ntent/pkg/PLAW-
[]
[59] [60]
[60] [61]
Underfull \hbox (badness 1383) in paragraph at lines 164--167
[]\OT1/cmr/m/n/12 Lawrence Liv-er-more Na-tional Lab-o-ra-tory, ``mpiP, a light
-weight MPI pro-filer.''
[]
) [61] (./Schrick-Noah_MS-Thesis.aux (./Chapter1.aux) (./Chapter2.aux)
(./Chapter3.aux) (./Chapter4.aux) (./Chapter5.aux) (./Chapter6.aux)) )
) [62] (./Schrick-Noah_MS-Thesis.aux (./Chapter1.aux) (./Chapter2.aux)
(./Chapter3.aux) (./Chapter4.aux) (./Chapter5.aux) (./Chapter6.aux))
LaTeX Warning: There were undefined references.
)
(\end occurred inside a group at level 6)
### semi simple group (level 6) entered at line 198 (\begingroup)
@ -564,7 +579,7 @@ Underfull \hbox (badness 1383) in paragraph at lines 164--167
### bottom level
Here is how much of TeX's memory you used:
4441 strings out of 478276
82864 string characters out of 5853013
82888 string characters out of 5853013
374018 words of memory out of 5000000
22551 multiletter control sequences out of 15000+600000
473155 words of font info for 41 fonts, out of 8000000 for 9000
@ -578,10 +593,10 @@ ts/type1/public/amsfonts/cm/cmr12.pfb></usr/share/texmf-dist/fonts/type1/public
y10.pfb></usr/share/texmf-dist/fonts/type1/public/amsfonts/cm/cmti12.pfb></usr/
share/texmf-dist/fonts/type1/public/amsfonts/cm/cmtt12.pfb></usr/share/texmf-di
st/fonts/type1/public/cm-super/sfrm1200.pfb>
Output written on Schrick-Noah_MS-Thesis.pdf (72 pages, 2129906 bytes).
Output written on Schrick-Noah_MS-Thesis.pdf (73 pages, 2127074 bytes).
PDF statistics:
330 PDF objects out of 1000 (max. 8388607)
186 compressed objects within 2 object streams
336 PDF objects out of 1000 (max. 8388607)
190 compressed objects within 2 object streams
0 named destinations out of 1000 (max. 500000)
141 words of extra memory for PDF output out of 10000 (max. 10000000)

View File

@ -1,3 +1,3 @@
{\vspace {\baselineskip }}
\contentsline {table}{\numberline {5.1}{\ignorespaces MPI Tags for the MPI Tasking Approach\relax }}{37}{}%
\contentsline {table}{\numberline {5.2}{\ignorespaces MPI Tags for the MPI Subgraphing Approach\relax }}{45}{}%
\contentsline {table}{\numberline {5.1}{\ignorespaces MPI Tags for the MPI Tasking Approach\relax }}{38}{}%
\contentsline {table}{\numberline {5.2}{\ignorespaces MPI Tags for the MPI Subgraphing Approach\relax }}{46}{}%

Binary file not shown.

View File

@ -24,53 +24,54 @@
\contentsline {chapter}{\numberline {CHAPTER 3: }{\bf \uppercase {UTILITY EXTENSIONS TO THE RAGE ATTACK GRAPH GENERATOR}}}{8}{}%
\contentsline {section}{\numberline {3.1}\bf Path Walking}{8}{}%
\contentsline {section}{\numberline {3.2}\bf Color Coding}{9}{}%
\contentsline {section}{\numberline {3.3}\bf Compound Operators}{11}{}%
\contentsline {section}{\numberline {3.3}\bf Compound Operators}{10}{}%
\contentsline {section}{\numberline {3.4}\bf Relational Operators}{12}{}%
\contentsline {section}{\numberline {3.5}\bf Intermediate Database Storage}{13}{}%
\contentsline {subsection}{\numberline {3.5.1}\it Memory Constraint Difficulties}{13}{}%
\contentsline {subsection}{\numberline {3.5.2}\it Maximizing Performance with Intermediate Database Storage}{14}{}%
\contentsline {subsection}{\numberline {3.5.3}\it Portability}{16}{}%
\contentsline {chapter}{\numberline {CHAPTER 4: }{\bf \uppercase {SYNCHRONOUS FIRING}}}{17}{}%
\contentsline {section}{\numberline {4.1}\bf Introduction}{17}{}%
\contentsline {subsection}{\numberline {4.1.1}\it Related Synchronous Firing Work}{18}{}%
\contentsline {section}{\numberline {4.2}\bf Necessary Alterations and Additions}{19}{}%
\contentsline {subsection}{\numberline {4.2.1}\it GNU Bison and Flex}{19}{}%
\contentsline {subsection}{\numberline {4.2.2}\it PostgreSQL}{20}{}%
\contentsline {subsection}{\numberline {4.2.3}\it Compound Operators}{21}{}%
\contentsline {subsection}{\numberline {4.2.4}\it Graph Generation}{21}{}%
\contentsline {section}{\numberline {4.3}\bf Experimental Networks and Results}{22}{}%
\contentsline {subsection}{\numberline {4.3.1}\it Experimental Networks}{22}{}%
\contentsline {subsection}{\numberline {4.3.2}\it Results}{24}{}%
\contentsline {chapter}{\numberline {CHAPTER 5: }{\bf \uppercase {Parallelization Using MESSAGE PASSING INTERFACE}}}{28}{}%
\contentsline {section}{\numberline {5.1}\bf Introduction to MPI Utilization for Attack and Compliance Graph Generation}{28}{}%
\contentsline {section}{\numberline {5.2}\bf Necessary Components}{28}{}%
\contentsline {subsection}{\numberline {5.2.1}\it Serialization}{28}{}%
\contentsline {section}{\numberline {5.3}\bf Tasking Approach}{29}{}%
\contentsline {subsection}{\numberline {5.3.1}\it Introduction to the Tasking Approach}{29}{}%
\contentsline {subsection}{\numberline {5.3.2}\it Algorithm Design}{31}{}%
\contentsline {subsubsection}{\numberline {5.3.2.1}Communication Structure}{31}{}%
\contentsline {subsubsection}{\numberline {5.3.2.2}Task 0}{33}{}%
\contentsline {subsubsection}{\numberline {5.3.2.3}Task 1}{33}{}%
\contentsline {subsubsection}{\numberline {5.3.2.4}Task 2}{34}{}%
\contentsline {subsubsection}{\numberline {5.3.2.5}Task 3}{34}{}%
\contentsline {subsubsection}{\numberline {5.3.2.6}Task 4 and Task 5}{36}{}%
\contentsline {subsubsection}{\numberline {5.3.2.7}MPI Tags}{37}{}%
\contentsline {subsection}{\numberline {5.3.3}\it Performance Expectations and Use Cases}{37}{}%
\contentsline {subsection}{\numberline {5.3.4}\it Results}{38}{}%
\contentsline {section}{\numberline {5.4}\bf Subgraphing Approach}{39}{}%
\contentsline {subsection}{\numberline {5.4.1}\it Introduction to the Subgraphing Approach}{41}{}%
\contentsline {subsection}{\numberline {5.4.2}\it Algorithm Design}{41}{}%
\contentsline {subsubsection}{\numberline {5.4.2.1}Worker Nodes}{42}{}%
\contentsline {subsubsection}{\numberline {5.4.2.2}Root Node}{43}{}%
\contentsline {subsubsection}{\numberline {5.4.2.3}Database Node}{44}{}%
\contentsline {subsubsection}{\numberline {5.4.2.4}MPI Tags}{44}{}%
\contentsline {subsection}{\numberline {5.4.3}\it Performance Expectations and Use Cases}{44}{}%
\contentsline {subsection}{\numberline {5.4.4}\it Results}{45}{}%
\contentsline {chapter}{\numberline {CHAPTER 6: }{\bf \uppercase {CONCLUSIONS AND FUTURE WORKS}}}{54}{}%
\contentsline {section}{\numberline {6.1}\bf Conclusions}{54}{}%
\contentsline {section}{\numberline {6.2}\bf Future Work}{55}{}%
\contentsline {subsection}{\numberline {3.5.1}\it Introduction to Intermediate Database Storage}{13}{}%
\contentsline {subsection}{\numberline {3.5.2}\it Memory Constraint Difficulties}{13}{}%
\contentsline {subsection}{\numberline {3.5.3}\it Maximizing Performance with Intermediate Database Storage}{15}{}%
\contentsline {subsection}{\numberline {3.5.4}\it Portability}{16}{}%
\contentsline {chapter}{\numberline {CHAPTER 4: }{\bf \uppercase {SYNCHRONOUS FIRING}}}{18}{}%
\contentsline {section}{\numberline {4.1}\bf Introduction}{18}{}%
\contentsline {subsection}{\numberline {4.1.1}\it Related Synchronous Firing Work}{19}{}%
\contentsline {section}{\numberline {4.2}\bf Necessary Alterations and Additions}{20}{}%
\contentsline {subsection}{\numberline {4.2.1}\it GNU Bison and Flex}{20}{}%
\contentsline {subsection}{\numberline {4.2.2}\it PostgreSQL}{21}{}%
\contentsline {subsection}{\numberline {4.2.3}\it Compound Operators}{22}{}%
\contentsline {subsection}{\numberline {4.2.4}\it Graph Generation}{22}{}%
\contentsline {section}{\numberline {4.3}\bf Experimental Networks and Results}{23}{}%
\contentsline {subsection}{\numberline {4.3.1}\it Experimental Networks}{23}{}%
\contentsline {subsection}{\numberline {4.3.2}\it Results}{25}{}%
\contentsline {chapter}{\numberline {CHAPTER 5: }{\bf \uppercase {Parallelization Using MESSAGE PASSING INTERFACE}}}{29}{}%
\contentsline {section}{\numberline {5.1}\bf Introduction to MPI Utilization for Attack and Compliance Graph Generation}{29}{}%
\contentsline {section}{\numberline {5.2}\bf Necessary Components}{29}{}%
\contentsline {subsection}{\numberline {5.2.1}\it Serialization}{29}{}%
\contentsline {section}{\numberline {5.3}\bf Tasking Approach}{30}{}%
\contentsline {subsection}{\numberline {5.3.1}\it Introduction to the Tasking Approach}{30}{}%
\contentsline {subsection}{\numberline {5.3.2}\it Algorithm Design}{32}{}%
\contentsline {subsubsection}{\numberline {5.3.2.1}Communication Structure}{32}{}%
\contentsline {subsubsection}{\numberline {5.3.2.2}Task 0}{34}{}%
\contentsline {subsubsection}{\numberline {5.3.2.3}Task 1}{34}{}%
\contentsline {subsubsection}{\numberline {5.3.2.4}Task 2}{35}{}%
\contentsline {subsubsection}{\numberline {5.3.2.5}Task 3}{35}{}%
\contentsline {subsubsection}{\numberline {5.3.2.6}Task 4 and Task 5}{37}{}%
\contentsline {subsubsection}{\numberline {5.3.2.7}MPI Tags}{38}{}%
\contentsline {subsection}{\numberline {5.3.3}\it Performance Expectations and Use Cases}{38}{}%
\contentsline {subsection}{\numberline {5.3.4}\it Results}{39}{}%
\contentsline {section}{\numberline {5.4}\bf Subgraphing Approach}{40}{}%
\contentsline {subsection}{\numberline {5.4.1}\it Introduction to the Subgraphing Approach}{42}{}%
\contentsline {subsection}{\numberline {5.4.2}\it Algorithm Design}{42}{}%
\contentsline {subsubsection}{\numberline {5.4.2.1}Worker Nodes}{43}{}%
\contentsline {subsubsection}{\numberline {5.4.2.2}Root Node}{44}{}%
\contentsline {subsubsection}{\numberline {5.4.2.3}Database Node}{45}{}%
\contentsline {subsubsection}{\numberline {5.4.2.4}MPI Tags}{45}{}%
\contentsline {subsection}{\numberline {5.4.3}\it Performance Expectations and Use Cases}{45}{}%
\contentsline {subsection}{\numberline {5.4.4}\it Results}{46}{}%
\contentsline {chapter}{\numberline {CHAPTER 6: }{\bf \uppercase {CONCLUSIONS AND FUTURE WORKS}}}{55}{}%
\contentsline {section}{\numberline {6.1}\bf Conclusions}{55}{}%
\contentsline {section}{\numberline {6.2}\bf Future Work}{56}{}%
{\hfill \ }
\contentsline {section}{\hspace {-\parindent }NOMENCLATURE}{58}{}%
\contentsline {section}{\hspace {-\parindent }NOMENCLATURE}{59}{}%
\addvspace {10pt}
\contentsline {section}{\hspace {-\parindent }BIBLIOGRAPHY}{58}{}%
\contentsline {section}{\hspace {-\parindent }BIBLIOGRAPHY}{59}{}%
{\hfill \ }