@misc{j_hale_compliance_nodate, title = {Compliance {Method} for a {Cyber}-{Physical} {System}}, author = {{J. Hale} and Hawrylak, P. and Papa, M.}, note = {U.S. Patent Number 9,471,789, Oct. 18, 2016.}, number = {9471789}, file = {Complaince_Graph_US_Patent_9471789:/home/noah/Zotero/storage/55BZN4U7/Complaince_Graph_US_Patent_9471789.pdf:application/pdf}, } @article{li_combining_2019, title = {Combining {OpenCL} and {MPI} to support heterogeneous computing on a cluster}, issn = {9781450372275}, doi = {10.1145/3332186.3333059}, abstract = {This paper presents an implementation of a heterogeneous programming model which combines Open Computing Language (OpenCL) and Message Passing Interface (MPI). The model is applied to solving a Markov decision process (MDP) with value iteration method. The performance test is conducted on a high performance computing cluster. At peak performance, the model is able to achieve a 57X speedup over a serial implementation. For an extremely large input MDP, which has 1,000,000 states, the obtained speedup is still over 12X, showing that this heterogeneous programming model can solve MDPs more efficiently than the serial solver does.}, journal = {ACM International Conference Proceeding Series}, author = {Li, Ming and Hawrylak, Peter and Hale, John}, year = {2019}, keywords = {Heterogeneous computing, HPC, MDP, MPI, OpenCL, Parallelism}, file = {Combining OpenCL and MPI to Support Heterogeneous Computing on a Cluster:/home/noah/Zotero/storage/TXHCQ5S8/Combining OpenCL and MPI to Support Heterogeneous Computing on a Cluster.pdf:application/pdf}, } @article{zeng_cyber_2017, title = {Cyber {Attack} {Analysis} {Based} on {Markov} {Process} {Model}}, author = {Zeng, Keming}, year = {2017}, file = {keming_thesis:/home/noah/Zotero/storage/LQY2YWSR/keming_thesis.pdf:application/pdf}, } @inproceedings{baloyi_guidelines_2019, address = {Skukuza South Africa}, title = {Guidelines for {Data} {Privacy} {Compliance}: {A} {Focus} on {Cyberphysical} {Systems} and {Internet} of {Things}}, doi = {10.1145/3351108.3351143}, booktitle = {{SAICSIT} '19: {Proceedings} of the {South} {African} {Institute} of {Computer} {Scientists} and {Information} {Technologists} 2019}, publisher = {Association for Computing Machinery}, author = {Baloyi, Ntsako and Kotzé, Paula}, year = {2019}, } @article{allman_complying_2006, title = {Complying with {Compliance}: {Blowing} it off is not an option.}, volume = {4}, number = {7}, journal = {ACM Queue}, author = {Allman, Eric}, year = {2006}, } @misc{noauthor_health_1996, title = {Health {Insurance} {Portability} and {Accountability} {Act} of 1996}, note = {Pub. L. No. 104-191. 1996 [Online]. Available: https://www.govinfo.gov/content/pkg/PLAW-104publ191/html/PLAW-104publ191.htm}, } @misc{PCI, title = {Payment {Card} {Industry} {(PCI)} {Data} {Security} {Standard}}, note = {{Available: https://www.pcisecuritystandards.org/documents/PCI$\_$DSS$\_$v3-2-1.pdf}}, month = may, year = {2018}, author = {PCI Security Standards Council} } @article{centrality_causal, title = {Node centrality measures are a poor substitute for causal inference}, volume = {9}, issn = {6846}, doi = {10.1038/s41598-019-43033-9}, journal = {Scientific Reports}, author = {Dablander, Fabian and Hinne, Max}, year = {2019}, } @inproceedings{Mieghem2018DirectedGA, title={Directed graphs and mysterious complex eigenvalues}, author={Piet Van Mieghem}, year={2018} } @article{Guo2017HermitianAM, title={Hermitian Adjacency Matrix of Digraphs and Mixed Graphs}, author={Krystal Guo and Bojan Mohar}, journal={Journal of Graph Theory}, year={2017}, volume={85} } @article{Brualdi2010SpectraOD, title={Spectra of digraphs}, author={Richard A. Brualdi}, journal={Linear Algebra and its Applications}, year={2010}, volume={432}, pages={2181-2213} } @article {PMID:30064421, Title = {A systematic survey of centrality measures for protein-protein interaction networks}, Author = {Ashtiani, Minoo and Salehzadeh-Yazdi, Ali and Razaghi-Moghadam, Zahra and Hennig, Holger and Wolkenhauer, Olaf and Mirzaie, Mehdi and Jafari, Mohieddin}, DOI = {10.1186/s12918-018-0598-2}, Number = {1}, Volume = {12}, Month = {July}, Year = {2018}, Journal = {BMC systems biology}, ISSN = {1752-0509}, Pages = {80}, Abstract = {<h4>Background</h4>Numerous centrality measures have been introduced to identify "central" nodes in large networks. The availability of a wide range of measures for ranking influential nodes leaves the user to decide which measure may best suit the analysis of a given network. The choice of a suitable measure is furthermore complicated by the impact of the network topology on ranking influential nodes by centrality measures. To approach this problem systematically, we examined the centrality profile of nodes of yeast protein-protein interaction networks (PPINs) in order to detect which centrality measure is succeeding in predicting influential proteins. We studied how different topological network features are reflected in a large set of commonly used centrality measures.<h4>Results</h4>We used yeast PPINs to compare 27 common of centrality measures. The measures characterize and assort influential nodes of the networks. We applied principal component analysis (PCA) and hierarchical clustering and found that the most informative measures depend on the network's topology. Interestingly, some measures had a high level of contribution in comparison to others in all PPINs, namely Latora closeness, Decay, Lin, Freeman closeness, Diffusion, Residual closeness and Average distance centralities.<h4>Conclusions</h4>The choice of a suitable set of centrality measures is crucial for inferring important functional properties of a network. We concluded that undertaking data reduction using unsupervised machine learning methods helps to choose appropriate variables (centrality measures). Hence, we proposed identifying the contribution proportions of the centrality measures with PCA as a prerequisite step of network analysis before inferring functional consequences, e.g., essentiality of a node.}, URL = {https://europepmc.org/articles/PMC6069823}, } @Article{Katz, author={Leo Katz}, title={{A new status index derived from sociometric analysis}}, journal={Psychometrika}, year=1953, volume={18}, number={1}, pages={39-43}, month={March}, keywords={}, doi={10.1007/BF02289026}, abstract={No abstract is available for this item.}, url={https://ideas.repec.org/a/spr/psycho/v18y1953i1p39-43.html} } @article{ModKatz, title={Katz centrality of Markovian temporal networks: Analysis and optimization}, author={Masaki Ogura and Victor M. Preciado}, journal={2017 American Control Conference (ACC)}, year={2017}, pages={5001-5006} } @book{newman2010networks, title={Networks: An Introduction}, author={Newman, M.E.J.}, isbn={9780191594175}, url={https://books.google.com/books?id=sgSlvgEACAAJ}, year={2010}, publisher={Oxford University Press} } @article{K_Path_Edge, doi = {10.1016/j.knosys.2012.01.007}, url = {https://doi.org/10.1016%2Fj.knosys.2012.01.007}, year = 2012, month = {jun}, publisher = {Elsevier {BV}}, volume = {30}, pages = {136--150}, author = {Pasquale De Meo and Emilio Ferrara and Giacomo Fiumara and Angela Ricciardello}, title = {A novel measure of edge centrality in social networks}, journal = {Knowledge-Based Systems} } @article{Adapted_PageRank, title={An algorithm for ranking the nodes of an urban network based on the concept of PageRank vector}, author={Taras Agryzkov and Jos{\'e} Luis Oliver and Leandro Tortosa and Jos{\'e}-Francisco Vicent}, journal={Appl. Math. Comput.}, year={2012}, volume={219}, pages={2186-2193} } @article{PageRank, title = {The anatomy of a large-scale hypertextual Web search engine}, journal = {Computer Networks and ISDN Systems}, volume = {30}, number = {1}, pages = {107-117}, year = {1998}, note = {Proceedings of the Seventh International World Wide Web Conference}, issn = {0169-7552}, doi = {https://doi.org/10.1016/S0169-7552(98)00110-X}, url = {https://www.sciencedirect.com/science/article/pii/S016975529800110X}, author = {Sergey Brin and Lawrence Page}, keywords = {World Wide Web, Search engines, Information retrieval, PageRank, Google}, abstract = {In this paper, we present Google, a prototype of a large-scale search engine which makes heavy use of the structure present in hypertext. Google is designed to crawl and index the Web efficiently and produce much more satisfying search results than existing systems. The prototype with a full text and hyperlink database of at least 24 million pages is available at http://google.stanford.edu/ To engineer a search engine is a challenging task. Search engines index tens to hundreds of millions of Web pages involving a comparable number of distinct terms. They answer tens of millions of queries every day. Despite the importance of large-scale search engines on the Web, very little academic research has been done on them. Furthermore, due to rapid advance in technology and Web proliferation, creating a Web search engine today is very different from three years ago. This paper provides an in-depth description of our large-scale Web search engine — the first such detailed public description we know of to date. Apart from the problems of scaling traditional search techniques to data of this magnitude, there are new technical challenges involved with using the additional information present in hypertext to produce better search results. This paper addresses this question of how to build a practical large-scale system which can exploit the additional information present in hypertext. Also we look at the problem of how to effectively deal with uncontrolled hypertext collections where anyone can publish anything they want.} } @article{PageRank_Survey, author = { Pavel Berkhin }, title = {A Survey on PageRank Computing}, journal = {Internet Mathematics}, volume = {2}, number = {1}, pages = {73-120}, year = {2005}, publisher = {Taylor & Francis}, doi = {10.1080/15427951.2005.10129098}, URL = {https://doi.org/10.1080/15427951.2005.10129098}, eprint = {https://doi.org/10.1080/15427951.2005.10129098} } @inproceedings{dominance, author = {Prosser, Reese T.}, title = {Applications of Boolean Matrices to the Analysis of Flow Diagrams}, year = {1959}, isbn = {9781450378680}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/1460299.1460314}, doi = {10.1145/1460299.1460314}, abstract = {Any serious attempt at automatic programming of large-scale digital computing machines must provide for some sort of analysis of program structure. Questions concerning order of operations, location and disposition of transfers, identification of subroutines, internal consistency, redundancy and equivalence, all involve a knowledge of the structure of the program under study, and must be handled effectively by any automatic programming system.}, booktitle = {Papers Presented at the December 1-3, 1959, Eastern Joint IRE-AIEE-ACM Computer Conference}, pages = {133–138}, numpages = {6}, location = {Boston, Massachusetts}, series = {IRE-AIEE-ACM '59 (Eastern)} } @article{10.1145/3491257, author = {Li, Ming and Hawrylak, Peter and Hale, John}, title = {Strategies for Practical Hybrid Attack Graph Generation and Analysis}, year = {2021}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, issn = {2692-1626}, url = {https://doi.org/10.1145/3491257}, doi = {10.1145/3491257}, abstract = {As an analytical tool in cyber-security, an attack graph (AG) is capable of discovering multi-stage attack vectors on target computer networks. Cyber-physical systems (CPSs) comprise a special type of network that not only contains computing devices but also integrates components that operate in the continuous domain, such as sensors and actuators. Using AGs on CPSs requires that the system models and exploit patterns capture both token- and real-valued information. In this paper, we describe a hybrid AG model for security analysis of CPSs and computer networks. Specifically, we focus on two issues related to applying the model in practice: efficient hybrid AG generation and techniques for information extraction from them. To address the first issue, we present an accelerated hybrid AG generator that employs parallel programming and high performance computing (HPC). We conduct performance tests on CPU and GPU platforms to characterize the efficiency of our parallel algorithms. To address the second issue, we introduce an analytical regimen based on centrality analysis and apply it to a hybrid AG generated for a target CPS system to discover effective vulnerability remediation solutions.}, note = {Just Accepted}, journal = {Digital Threats}, month = {oct}, keywords = {attack graph, breadth-first search, cyber-physical system, high performance computing} }