CS-7863-Final/Report/Bibliography.bib

1437 lines
103 KiB
BibTeX
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

@misc{noauthor_parmetis_nodate,
title = {{ParMETIS} - {Parallel} {Graph} {Partitioning} and {Fill}-reducing {Matrix} {Ordering} {\textbar} {Karypis} {Lab}},
url = {http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview},
}
@misc{noauthor_overview_nodate,
title = {An {Overview} of the {Parallel} {Boost} {Graph} {Library} - 1.75.0},
authors = {Edmonds, Nick and Gregor, Douglas and Lumsdaine, Andrew},
year = {2009},
url = {https://www.boost.org/doc/libs/1_75_0/libs/graph_parallel/doc/html/overview.html},
}
@phdthesis{nichols_2018,
title = {{Hybrid} {Attack} {Graphs} for {Use} with a {Simulation} of a {Cyber-Physical} {System}},
author = {Nichols, Will M.},
school = {The {University} of {Tulsa}},
year = {2018},
file = {Will_Nichols_Thesis_FINAL_VER:/home/noah/Zotero/storage/8AXSZXJN/Will_Nichols_Thesis_FINAL_VER.pdf:application/pdf},
}
@misc{noauthor_boost_nodate,
title = {The {Boost} {Graph} {Library}, vers. 1.75.0},
author = {Siek, Jeremy and Lee, Lie-Quan and Lumsdaine, Andrew},
note = {{https://www.boost.org/doc/libs/1$\_$75$\_$0/libs/graph/doc/index.html}},
}
@misc{Graphviz,
title = {{DOT} {Language}},
author = {{The Graphviz Authors}},
note = {https://graphviz.org/doc/info/lang.html}
}
@misc{noauthor_parallel_nodate,
title = {Parallel {BGL} {Parallel} {Boost} {Graph} {Library} - 1.75.0},
authors = {Edmonds, Nick and Gregor, Douglas and Lumsdaine, Andrew},
note = {{https://www.boost.org/doc/libs/1$\_$75$\_$0/libs/graph$\_$parallel/doc/html/index.html}},
}
@misc{noauthor_boost_nodate-1,
title = {Boost {Graph} {Library}: {Converting} {Existing} {Graphs} to {BGL} - 1.75.0},
note = {{https://www.boost.org/doc/libs/1$\_$75$\_$0/libs/graph/doc/leda$\_$conversion.html}},
}
@misc{noauthor_graph_nodate,
title = {Graph {Partitioning} {\textbar} {Our} {Pattern} {Language}},
note = {https://patterns.eecs.berkeley.edu/?page_id=571},
}
@misc{CVE-2019-10747,
key = {CVE-2019-10747},
title = {{set-value is vulnerable to Prototype Pollution in versions lower than 3.0.1. The function mixin-deep could be tricked into adding or modifying properties of Object.prototype using any of the constructor, prototype and $\_$proto$\_$ payloads.}},
howpublished = {National Vulnerability Database},
institution ={NIST},
month = aug,
year = {2019},
url = {https://nvd.nist.gov/vuln/detail/CVE-2019-10747},
}
@article{abraham_predictive_2014,
title = {Predictive {Cyber} {Security} {Analytics} {Framework} : {A} {Non}-{Homogenous} {Markov} {Model} for {Security} {Quantification}},
doi = {10.5121/csit.2014.41316},
abstract = {Numerous security metrics have been proposed in the past for protecting computer networks. However we still lack effective techniques to accurately measure the predictive security risk of an enterprise taking into account the dynamic attributes associated with vulnerabilities that can change over time. In this paper we present a stochastic security framework for obtaining quantitative measures of security using attack graphs. Our model is novel as existing research in attack graph analysis do not consider the temporal aspects associated with the vulnerabilities, such as the availability of exploits and patches which can affect the overall network security based on how the vulnerabilities are interconnected and leveraged to compromise the system. Gaining a better understanding of the relationship between vulnerabilities and their lifecycle events can provide security practitioners a better understanding of their state of security. In order to have a more realistic representation of how the security state of the network would vary over time, a nonhomogeneous model is developed which incorporates a time dependent covariate, namely the vulnerability age. The daily transition-probability matrices are estimated using Frei's Vulnerability Lifecycle model. We also leverage the trusted CVSS metric domain to analyze how the total exploitability and impact measures evolve over a time period for a given network.},
number = {January},
author = {Abraham, Subil and Nair, Suku},
year = {2014},
pages = {195--209},
file = {Predictive_Cyber_Security_Analytics_Framework_A_No:/home/noah/Zotero/storage/YV8Y3CNR/Predictive_Cyber_Security_Analytics_Framework_A_No.pdf:application/pdf},
}
@article{ainsworth_graph_2016,
title = {Graph prefetching using data structure knowledge},
volume = {01-03-June},
issn = {9781450343619},
doi = {10.1145/2925426.2926254},
abstract = {Searches on large graphs are heavily memory latency bound, as a result of many high latency DRAM accesses. Due to the highly irregular nature of the access patterns involved, caches and prefetchers, both hardware and software, perform poorly on graph workloads. This leads to CPU stalling for the majority of the time. However, in many cases the data access pattern is well defined and predictable in advance, many falling into a small set of simple patterns. Although existing implicit prefetchers cannot bring significant benefit, a prefetcher armed with knowledge of the data structures and access patterns could accurately anticipate applications' traversals to bring in the appropriate data. This paper presents a design of an explicitly configured prefetcher to improve performance for breadth-first searches and sequential iteration on the efficient and commonly-used compressed sparse row graph format. By snooping L1 cache accesses from the core and reacting to data returned from its own prefetches, the prefetcher can schedule timely loads of data in advance of the application needing it. For a range of applications and graph sizes, our prefetcher achieves average speedups of 2.3×, and up to 3.3×, with little impact on memory bandwidth requirements.},
journal = {Proceedings of the International Conference on Supercomputing},
author = {Ainsworth, Sam and Jones, Timothy M.},
year = {2016},
keywords = {Graphs, Prefetching},
file = {Graph Prefetching Using Data Structure Knowledge:/home/noah/Zotero/storage/UUVEP42L/Graph Prefetching Using Data Structure Knowledge.pdf:application/pdf},
}
@article{albanese_graphical_2018,
title = {A {Graphical} {Model} to {Assess} the {Impact} of {Multi}-{Step} {Attacks}},
volume = {15},
url = {http://journals.sagepub.com/doi/10.1177/1548512917706043},
doi = {10.1177/1548512917706043},
abstract = {In the last several decades, networked systems have grown in complexity and sophistication, introducing complex interdependencies amongst their numerous and diverse components. Attackers can leverage such interdependencies to penetrate seemingly well-guarded networks through sophisticated multi-step attacks. Research has shown that explicit and implicit interdependencies exist at various layers of the hardware and software architecture. In particular, dependencies between vulnerabilities and dependencies between applications and services are critical for assessing the impact of multi-step attacks. These two classes of interdependencies have been traditionally studied using attack and dependency graphs respectively. Although significant work has been done in the area of both attack and dependency graphs, we demonstrate that neither of these models can provide an accurate assessment of an attacks impact, when used in isolation. To address this limitation, we take a mission-centric approach and present a solution to integrate these two powerful models into a unified framework that enables us to accurately assess the impact of multi-step attacks and identify high-impact attack paths within a network. This analysis can ultimately generate effective hardening recommendations, and can be seen as one phase of a continuous process that iteratively cycles through impact analysis and vulnerability remediation stages.},
number = {1},
journal = {The Journal of Defense Modeling and Simulation: Applications, Methodology, Technology},
author = {Albanese, Massimiliano and Jajodia, Sushil},
month = jan,
year = {2018},
pages = {79--93},
}
@article{balaji_combining_2019,
title = {Combining data duplication and graph reordering to accelerate parallel graph processing},
issn = {9781450366700},
doi = {10.1145/3307681.3326609},
abstract = {Performance of single-machine, shared memory graph processing is affected by expensive atomic updates and poor cache locality. Data duplication, a popular approach to eliminate atomic updates by creating thread-local copies of shared data, incurs extreme memory overheads due to the large sizes of typical input graphs. Even memory-efficient duplication strategies that exploit the power-law structure common to many graphs (by duplicating only the highly-connected "hub" vertices) suffer from overheads for having to dynamically identify the hub vertices. Degree Sorting, a popular graph reordering technique that re-assigns hub vertices consecutive IDs in a bid to improve spatial locality, is effective for single-threaded graph applications but suffers from increased false sharing in parallel executions. The main insight of this work is that the combination of data duplication and Degree Sorting eliminates the overheads of each optimization. Degree Sorting improves the efficiency of data duplication by assigning hub vertices consecutive IDs which enables easy identification of the hub vertices. Additionally, duplicating the hub vertex data eliminates false sharing in Degree Sorting since each thread updates its local copy of the hub vertex data. We evaluate this mutually-enabling combination of power-law-specific data duplication and Degree Sorting in a system called RADAR. RADAR improves performance by eliminating atomic updates for hub vertices and improving the cache locality of graph applications, providing speedups of up to 165x (1.88x on average) across different graph applications and input graphs.},
number = {Llc},
journal = {HPDC 2019- Proceedings of the 28th International Symposium on High-Performance Parallel and Distributed Computing},
author = {Balaji, Vignesh and Lucia, Brandon},
year = {2019},
keywords = {Atomics, Data duplication, Graph processing, Locality, Power-law},
pages = {133--144},
file = {Combining Data Duplication and Graph Reordering to Accelerate Parallel Graph Processing:/home/noah/Zotero/storage/4ET9RHBJ/Combining Data Duplication and Graph Reordering to Accelerate Parallel Graph Processing.pdf:application/pdf},
}
@article{berry_graph_2007,
title = {Graph {Analysis} with {High} {Performance} {Computing}.},
journal = {Computing in Science and Engineering},
author = {Berry, Jonathan and Hendrickson, Bruce},
year = {2007},
file = {Graph Analysis With High-Performance Computing:/home/noah/Zotero/storage/T84DCNCC/Graph Analysis With High-Performance Computing.pdf:application/pdf},
}
@article{besta_high-performance_2020,
title = {High-performance parallel graph coloring with strong guarantees on work, depth, and quality},
issn = {9781728199986},
abstract = {We develop the first parallel graph coloring heuristics with strong theoretical guarantees on work and depth and coloring quality. The key idea is to design a relaxation of the vertex degeneracy order, a well-known graph theory concept, and to color vertices in the order dictated by this relaxation. This introduces a tunable amount of parallelism into the degeneracy ordering that is otherwise hard to parallelize. This simple idea enables significant benefits in several key aspects of graph coloring. For example, one of our algorithms ensures polylogarithmic depth and a bound on the number of used colors that is superior to all other parallelizable schemes, while maintaining work-efficiency. In addition to provable guarantees, the developed algorithms have competitive run-times for several real-world graphs, while almost always providing superior coloring quality. Our degeneracy ordering relaxation is of separate interest for algorithms outside the context of coloring.},
journal = {arXiv},
author = {Besta, Maciej and Carigiet, Armon and Vonarburg-Shmaria, Zur and Janda, Kacper and Gianinazzi, Lukas and Hoefler, Torsten},
year = {2020},
file = {High-Performance Parallel Graph Coloring with Strong Guarantees on Work, Depth, and Quality:/home/noah/Zotero/storage/RDNJ6UG5/High-Performance Parallel Graph Coloring with Strong Guarantees on Work, Depth, and Quality.pdf:application/pdf},
}
@article{chen_attack_2019,
title = {Attack intent analysis method based on attack path graph},
issn = {9781450376624},
doi = {10.1145/3371676.3371680},
abstract = {With the rapid development of network technology, network security problems are gradually increasing, and the network attack situation is very severe. In a complex attack scenario, timely detection of potential attack behaviors and timely identification and pre-judgment of attack intentions are important components of security risks. However, the attack behavior in the network presents complexity, multi-step and uncertainty, which brings new technical challenges to attack intent analysis. Aiming at the problem that the attack intention of multi-step complex attack is difficult to identify, this paper proposes an attack intention analysis method based on attack path graph. Firstly, aiming at the multi-step complex attack behavior analysis problem, the key asset assessment technology is used to find out the key assets in the network system, and the hypothetical attack intention is generated according to the security protection requirements of the network system. Then, it is difficult to manually construct the attack path map in the large-scale network, and the automatic generation of the attack path map is realized. Finally, a method of network attack intent identification is proposed and a calculation method of attack intent probability is designed, which improves the efficiency and accuracy of attack intent recognition.},
journal = {ACM International Conference Proceeding Series},
author = {Chen, Biqiong and Liu, Yanhua and Li, Shijin and Gao, Xiaoling},
year = {2019},
keywords = {Attack Intention, Attack Path Graph, Key Asset, Vulnerability, Vulnerability Exploitation},
pages = {97--102},
file = {Attack Intent Analysis Method Based on Attack Path Graph:/home/noah/Zotero/storage/WDJ69WSH/Attack Intent Analysis Method Based on Attack Path Graph.pdf:application/pdf},
}
@phdthesis{cook_rage_2018,
title = {{RAGE}: {The} {Rage} {Attack} {Graph} {Engine}},
author = {Cook, Kyle},
school = {The {University} of {Tulsa}},
year = {2018},
file = {Kyle Cook Thesis:/home/noah/Zotero/storage/2SR28HM2/Kyle Cook Thesis.pdf:application/pdf},
}
@article{cook_scalable_2016,
title = {Scalable attack graph generation},
issn = {9781450337526},
doi = {10.1145/2897795.2897821},
abstract = {Attack graphs are a powerful modeling technique with which to explore the attack surface of a system. However, they can be difficult to generate due to the exponential growth of the state space, often times making exhaustive search im- practical. This paper discusses an approach for generating large attack graphs with an emphasis on scalable generation over a distributed system. First, a serial algorithm is presented, highlighting bottlenecks and opportunities to exploit inherent concurrency in the generation process. Then a strategy to parallelize this process is presented. Finally, we discuss plans for future work to implement the parallel algorithm using a hybrid distributed/shared memory programming model on a heterogeneous compute node cluster.},
journal = {Proceedings of the 11th Annual Cyber and Information Security Research Conference, CISRC 2016},
author = {Cook, Kyle and Shaw, Thomas and Hale, John and Hawrylak, Peter},
year = {2016},
keywords = {Attack graphs, Attack modeling, Vulnerability analysis},
file = {Attachment:/home/noah/Zotero/storage/2YNSLTQH/Scalable Attack Graph Generation:application/pdf},
}
@article{dai_fpgp_2016,
title = {{FPGP}: {Graph} processing framework on {FPGA}: {A} case study of breadth-first search},
issn = {9781450338561},
doi = {10.1145/2847263.2847339},
abstract = {Large-scale graph processing is gaining increasing attentions in many domains. Meanwhile, FPGA provides a power-efficient and highly parallel platform for many applications, and has been applied to custom computing in many domains. In this paper, we describe FPGP (FPGA Graph Processing), a streamlined vertex-centric graph processing framework on FPGA, based on the interval-shard structure. FPGP is adaptable to different graph algorithms and users do not need to change the whole implementation on the FPGA. In our implementation, an on-chip parallel graph processor is proposed to both maximize the off-chip bandwidth of graph data and fully utilize the parallelism of graph processing. Meanwhile, we analyze the performance of FPGP and show the scalability of FPGP when the bandwidth of data path increases. FPGP is more power-efficient than single machine systems and scalable to larger graphs compared with other FPGA-based graph systems.},
journal = {FPGA 2016 - Proceedings of the 2016 ACM/SIGDA International Symposium on Field-Programmable Gate Arrays},
author = {Dai, Guohao and Chi, Yuze and Wang, Yu and Yang, Huazhong},
year = {2016},
keywords = {FPGA framework, Large scale graph processing},
pages = {105--110},
file = {FPGP\: Graph Processing Framework on FPGA:/home/noah/Zotero/storage/QJUQ3SDZ/FPGP Graph Processing Framework on FPGA.pdf:application/pdf},
}
@phdthesis{edmonds_active_2013,
title = {Active messages as a spanning model for parallel graph computation},
author = {Edmonds, N},
year = {2013},
}
@phdthesis{ming_diss,
title = {{A} {System} for {Attack} {Graph} {Generation} and {Analysis}},
author = {Li, Ming},
school = {The {University} of {Tulsa}},
year = {2021},
}
@article{edmonds_design_nodate,
title = {Design of a {Large}-{Scale} {Hybrid}-{Parallel} {Graph} {Library}},
journal = {International Conference for High Performance Computing, Student Research Symposium},
author = {Edmonds, N and Willcock, Jeremiah and Lumsdaine, Andrew and Hoefler, Torsten},
}
@article{edmonds_expressing_2013,
title = {Expressing graph algorithms using generalized active messages},
issn = {9781450321303},
doi = {10.1145/2464996.2465441},
abstract = {Recently, graph computation has emerged as an important class of high-performance computing application whose characteristics differ markedly from those of traditional, compute-bound kernels. Libraries such as BLAS, LAPACK, and others have been successful in codifying best practices in numerical computing. The data-driven nature of graph applications necessitates a more complex application stack incorporating runtime optimization. In this paper, we present a method of phrasing graph algorithms as collections of asynchronous, concurrently executing, concise code fragments which may be invoked both locally and in remote address spaces. A runtime layer performs a number of dynamic optimizations, including message coalescing, message combining, and software routing. We identify a number of common patterns in these algorithms, and explore how this programming model can express those patterns. Algorithmic transformations are discussed which expose asyn- chrony that can be leveraged by the runtime to improve performance and reduce resource utilization. Practical implementations and performance results are provided for a number of representative algorithms. © 2013 ACM.},
journal = {Proceedings of the International Conference on Supercomputing},
author = {Edmonds, Nicholas and Willcock, Jeremiah and Lumsdaine, Andrew},
year = {2013},
keywords = {active messages, parallel graph algorithms, programming models},
pages = {283--292},
file = {Expressing graph algorithms using generalized active messages:/home/noah/Zotero/storage/XV4HM8JV/Expressing graph algorithms using generalized active messages.pdf:application/pdf},
}
@article{eyerman_many-core_2019,
title = {Many-core graph workload analysis},
issn = {9781538683842},
doi = {10.1109/SC.2018.00025},
abstract = {Graph applications have specific characteristics that are not common in other application domains and therefore require thorough analysis to guide future graph processing hardware design. In this paper, we analyze multiple graph applications on current multi and many-core processors, and provide conclusions and recommendations for future designs. We restate well-known characteristics of graph applications, such as a low compute to memory ratio and irregular memory access patterns, but we also provide new important insights on executing graph applications on many-core processors. Our main novel observations are (i) some memory streams do show locality, while others show no locality, (ii) thread imbalance becomes a major problem with many threads, and (iii) many threads are required to saturate high-bandwidth memories. The first observation calls for a selective memory access policy, where accesses with locality are cached and prefetched, while accesses without locality can remain uncached to save cache capacity, and can fetch only one element from memory instead of a full cache line to save on memory bandwidth. The last two observations are contradicting: more threads are needed, but they are not used efficiently due to thread imbalance. Our recommendation is therefore to thoroughly revise the graph analysis algorithms to provide more scalable parallelism to be able to exploit the potential of many-core architectures with high-bandwidth memory. In addition, providing a few high-performance cores can speed up sections with low parallelism.},
journal = {Proceedings - International Conference for High Performance Computing, Networking, Storage, and Analysis, SC 2018},
author = {Eyerman, Stijn and Heirman, Wim and Du Bois, Kristof and Fryman, Joshua B. and Hur, Ibrahim},
year = {2019},
keywords = {Graph applications, Many-core processors, Workload analysis},
pages = {282--292},
file = {Many-Core Graph Workload Analysis:/home/noah/Zotero/storage/Y9QMRSQ7/Many-Core Graph Workload Analysis.pdf:application/pdf},
}
@article{for_p_2013,
title = {P {REDICTIVE} {C} {YBER} {S} {ECURITY} {A} {NALYTICS} {F} {RAMEWORK} : {A} {NON} - {HOMOGENOUS} {M} {ARKOV}},
author = {For, M Odel and Uantification, S Ecurity Q},
year = {2013},
file = {Attachment:/home/noah/Zotero/storage/ZLEYR33F/Predictive Cyber Security Analytics Framework:application/pdf},
}
@article{ghosh_planner-based_2012,
title = {A planner-based approach to generate and analyze minimal attack graph},
volume = {36},
doi = {10.1007/s10489-010-0266-8},
abstract = {In the present scenario, even well administered networks are susceptible to sophisticated cyber attacks. Such attack combines vulnerabilities existing on different systems/ services and are potentially more harmful than single point attacks. One of the methods for analyzing such security vulnerabilities in an enterprise network is the use of attack graph. It is a complete graph which gives a succinct representation of different attack scenarios, depicted by attack paths. An attack path is a logical succession of exploits, where each exploit in the series satisfies the preconditions for subsequent exploits and makes a causal relationship among them. Thus analysis of the attack graph may help in assessing network security from hackers' perspective. One of the intrinsic problems with the generation and analysis of such a complete attack graph is its scalability. In this work, an approach based on Planner, a special purpose search algorithm from artificial intelligence domain, has been proposed for time-efficient, scalable representation of the attack graphs. Further, customized algorithms have been developed for automatic generation of attack paths (using Planner as a low-level module). The analysis shows that generation of attack graph using the customized algorithms can be done in polynomial time. A case study has also been presented to demonstrate the efficacy of the proposed methodology. © Springer Science+Business Media, LLC 2010.},
number = {2},
journal = {Applied Intelligence},
author = {Ghosh, Nirnay and Ghosh, S. K.},
year = {2012},
keywords = {Attack graph, Attack path, Exploit, Network security, Planner},
pages = {369--390},
file = {Ghosh-Ghosh2012_Article_APlanner-basedApproachToGenera:/home/noah/Zotero/storage/2Q6YTVFE/Ghosh-Ghosh2012_Article_APlanner-basedApproachToGenera.pdf:application/pdf},
}
@article{guzzi_graph_2020,
title = {Graph analysis},
issn = {9781450321884},
doi = {10.1016/b978-0-12-819350-1.00009-8},
abstract = {The problem of efficiently analyzing graphs of various shapes and sizes has been recently enjoying an increased level of attention both in the academia and in the industry. This trend prompted creation of specialized graph databases that have been rapidly gaining popularity of late. In this paper we argue that there exist alternatives to graph databases, providing competitive or superior performance, that do not require replacement of the entire existing storage infrastructure by the companies wishing to deploy them. Copyright © 2013 ACM.},
journal = {Biological Network Analysis},
author = {Guzzi, Pietro Hiram and Roy, Swarup},
year = {2020},
pages = {25--51},
file = {Graph Analyysis Do we have to reinvent the wheel:/home/noah/Zotero/storage/LINFG68I/Graph Analyysis Do we have to reinvent the wheel.pdf:application/pdf},
}
@article{hamlet_dependency_2016,
title = {Dependency graph analysis and moving target defense selection},
issn = {9781450345705},
doi = {10.1145/2995272.2995277},
abstract = {Moving target defense (MTD) is an emerging paradigm in which system defenses dynamically mutate in order to decrease the overall system attack surface. Though the concept is promising, implementations have not been widely adopted. The field has been actively researched for over ten years, and has only produced a small amount of extensively adopted defenses, most notably, address space layout randomization (ASLR). This is despite the fact that there currently exist a variety of moving target implementations and proofs-of-concept. We suspect that this results from the moving target controls breaking critical system dependencies from the perspectives of users and administrators, as well as making things more difficult for attackers. As a result, the impact of the controls on overall system security is not sufficient to overcome the inconvenience imposed on legitimate system users. In this paper, we analyze a successful MTD approach. We study the control's dependency graphs, showing how we use graph theoretic and network properties to predict the effectiveness of the selected control.},
journal = {MTD 2016 - Proceedings of the 2016 ACM Workshop on Moving Target Defense, co-located with CCS 2016},
author = {Hamlet, Jason R. and Lamb, Christopher C.},
year = {2016},
keywords = {Cybersecurity, Dynamic defense, Moving target defense},
pages = {105--116},
file = {Dependency graph analysis and moving target defense selection:/home/noah/Zotero/storage/TMCPNEJ8/Dependency graph analysis and moving target defense selection.pdf:application/pdf},
}
@article{hogan_graph_2013,
title = {Graph coarsening for path finding in cybersecurity graphs},
issn = {9781450316873},
doi = {10.1145/2459976.2459984},
abstract = {A network hacking attack in which hackers repeatedly steal password hashes and move through a computer network with the goal of reaching a computer with high level administrative privileges is known as a pass-the-hash attack. In this paper we apply graph coarsening on graphs obtained from computer network data for the purpose of (a) detecting hackers using this attack and (b) assessing the risk level of the network's current state. We repeatedly contract edges (obtaining a graph minor), which preserves the existence of paths in the graph, and take powers of the adjacency matrix to count the paths. This allows us to detect the existence of paths as well as find paths that have high risk of being exploited by adversaries. Copyright 2012 ACM.},
journal = {ACM International Conference Proceeding Series},
author = {Hogan, Emilie and Johnson, John R. and Halappanavar, Mahantesh},
year = {2013},
file = {Graph Coarsening:/home/noah/Zotero/storage/NKSMCJP8/Graph Coarsening.pdf:application/pdf},
}
@article{hopcroft_algorithm_1973,
title = {Algorithm 447: {Efficient} algorithms for graph manipulation},
volume = {16},
doi = {10.1145/362248.362272},
abstract = {Efficient algorithms are presented for partitioning a graph into connected components, biconnected components and simple paths. The algorithm for partitioning of a graph into simple paths of iterative and each iteration produces a new path between two vertices already on paths. (The start vertex can be specified dynamically.) If V is the number of vertices and E is the number of edges, each algorithm requires time and space proportional to max (V, E) when executed on a random access computer. © 1973, ACM. All rights reserved.},
number = {6},
journal = {Communications of the ACM},
author = {Hopcroft, John and Tarjan, Robert},
year = {1973},
keywords = {analysis of algorithms, graph manipulation, graphs},
pages = {372--378},
file = {Algorithm 447\: efficient algorithms for graph manipulation:/home/noah/Zotero/storage/ZZBQJLL3/Algorithm 447 efficient algorithms for graph manipulation.pdf:application/pdf},
}
@misc{j_hale_compliance_nodate,
title = {Compliance {Method} for a {Cyber}-{Physical} {System}},
author = {{J. Hale} and Hawrylak, P. and Papa, M.},
note = {U.S. Patent Number 9,471,789, Oct. 18, 2016.},
number = {9471789},
file = {Complaince_Graph_US_Patent_9471789:/home/noah/Zotero/storage/55BZN4U7/Complaince_Graph_US_Patent_9471789.pdf:application/pdf},
}
@book{jajodia_topological_2005,
address = {New York},
series = {Massive {Computing}},
title = {Topological {Analysis} of {Network} {Attack} {Vulnerability}},
volume = {5},
isbn = {0-387-24226-0},
url = {http://link.springer.com/10.1007/b104908},
publisher = {Springer-Verlag},
author = {Jajodia, S. and Noel, S. and O'Berry, B.},
editor = {Kumar, Vipin and Srivastava, Jaideep and Lazarevic, Aleksandar},
year = {2005},
doi = {10.1007/b104908},
}
@article{jha_minimization_2002,
title = {Minimization and reliability analyses of attack graphs},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.61.1788&rep=rep1&type=pdf%5Cnpapers2://publication/uuid/BFFE4D66-6A9E-4144-B0E8-26AE7B3C0D81},
abstract = {In Section 5 we present a to help administrators decide what measures to deploy to thwart attacks. In Section 6 we present a over probabilistic based on the value iteration algorithm defined for Markov Decision Processes},
journal = {Citeseer},
author = {Jha, S and Wing, J and Sheyner, O},
year = {2002},
file = {CMU-CS-02-109:/home/noah/Zotero/storage/3UKICBJY/CMU-CS-02-109.pdf:application/pdf},
}
@article{jha_two_2002,
title = {Two formal analyses of attack graphs},
volume = {2002-Janua},
issn = {0769516890},
doi = {10.1109/CSFW.2002.1021806},
abstract = {An attack graph is a succinct representation of all paths through a system that end in a state where an intruder has successfully achieved his goal. Today Red Teams determine the vulnerability of networked systems by drawing gigantic attack graphs by hand. Constructing attack graphs by hand is tedious, error-prone, and impractical for large systems. By viewing an attack as a violation of a safety property, we can use off-the-shelf model checking technology to produce attack graphs automatically: a successful path from the intruder's viewpoint is a counterexample produced by the model checker In this paper we present an algorithm for generating attack graphs using model checking as a subroutine. Security analysts use attack graphs for detection, defense and forensics. In this paper we present a minimization analysis technique that allows analysts to decide which minimal set of security measures would guarantee the safety of the system. We provide a formal characterization of this problem: we prove that it is polynomially equivalent to the minimum hitting set problem and we present a greedy algorithm with provable bounds. We also present a reliability analysis technique that allows analysts to perform a simple cost-benefit trade-off depending on the likelihoods of attacks. By interpreting attack graphs as Markov Decision Processes we can use the value iteration algorithm to compute the probabilities of intruder success for each attack the graph.},
journal = {Proceedings of the Computer Security Foundations Workshop},
author = {Jha, S. and Sheyner, O. and Wing, J.},
year = {2002},
keywords = {Computer science, Contracts, Forensics, Greedy algorithms, Intrusion detection, Performance analysis, Polynomials, Safety, Security, US Department of Defense},
pages = {49--63},
file = {jha-wing:/home/noah/Zotero/storage/3TQ6ZD38/jha-wing.pdf:application/pdf},
}
@article{kaynar_distributed_2016,
title = {Distributed {Attack} {Graph} {Generation}},
volume = {13},
doi = {10.1109/TDSC.2015.2423682},
abstract = {Attack graphs show possible paths that an attacker can use to intrude into a target network and gain privileges through series of vulnerability exploits. The computation of attack graphs suffers from the state explosion problem occurring most notably when the number of vulnerabilities in the target network grows large. Parallel computation of attack graphs can be utilized to attenuate this problem. When employed in online network security evaluation, the computation of attack graphs can be triggered with the correlated intrusion alerts received from sensors scattered throughout the target network. In such cases, distributed computation of attack graphs becomes valuable. This article introduces a parallel and distributed memory-based algorithm that builds vulnerability-based attack graphs on a distributed multi-agent platform. A virtual shared memory abstraction is proposed to be used over such a platform, whose memory pages are initialized by partitioning the network reachability information. We demonstrate the feasibility of parallel distributed computation of attack graphs and show that even a small degree of parallelism can effectively speed up the generation process as the problem size grows. We also introduce a rich attack template and network model in order to form chains of vulnerability exploits in attack graphs more precisely.},
number = {5},
journal = {IEEE Transactions on Dependable and Secure Computing},
author = {Kaynar, Kerem and Sivrikaya, Fikret},
year = {2016},
note = {Publisher: IEEE},
keywords = {Attack graph, distributed computing, exploit, reachability, vulnerability, weakness},
pages = {519--532},
file = {07087377:/home/noah/Zotero/storage/7VGU4BIA/07087377.pdf:application/pdf},
}
@article{kim_efficient_2018,
title = {Efficient parallel all-pairs shortest paths algorithm for complex graph analysis},
issn = {9781450365239},
doi = {10.1145/3229710.3229730},
abstract = {The all-pairs shortest path problem is a classic problem to study characteristics of the given graphs. Though many efficient all-pairs shortest path algorithms have been published, it is still a very expensive computing task, especially with large graph datasets. In this paper, we propose an efficient parallel all-pairs shortest path algorithm based on Peng et al.'s fast sequential algorithm on shared-memory parallel environments to achieve faster and more efficient calculation for large-scale real-world networks. Peng et al.'s algorithm needs to sort vertices with respect to their degrees. However, it turns out the original algorithm uses less efficient sorting method, which is a significant portion of parallel overhead. Therefore, we also propose an efficient parallel method to sort data within a fixed range, in order to minimize the parallel overhead in our parallel algorithm. The optimized efficient sorting method can be used for general sorting purposes. Our experimental analysis shows that our proposed parallel algorithm achieves very high parallel speedup, even hyper-linear speedup, with real-world test datasets on two different shared-memory multi-core systems.},
journal = {ACM International Conference Proceeding Series},
author = {Kim, Jong Wook and Choi, Hyoeun and Bae, Seung Hee},
year = {2018},
keywords = {All-pairs shortest paths, Parallel algorithms, Shared-memory parallelism},
file = {(REWRITE ALG TO MAKE MORE PARALLEL)Efficient Parallel All-Pairs Shortest Paths Algorithm for Complex:/home/noah/Zotero/storage/NNKVMYX3/(REWRITE ALG TO MAKE MORE PARALLEL)Efficient Parallel All-Pairs Shortest Paths Algorithm for Complex.pdf:application/pdf},
}
@article{kotenko_attack_2006,
title = {Attack graph based evaluation of network security},
volume = {4237 LNCS},
issn = {3540478205},
doi = {10.1007/11909033_20},
abstract = {The perspective directions in evaluating network security are simulating possible malefactor's actions, building the representation of these actions as attack graphs (trees, nets), the subsequent checking of various properties of these graphs, and determining security metrics which can explain possible ways to increase security level. The paper suggests a new approach to security evaluation based on comprehensive simulation of malefactor's actions, construction of attack graphs and computation of different security metrics. The approach is intended for using both at design and exploitation stages of computer networks. The implemented software system is described, and the examples of experiments for analysis of network security level are considered. © IFIP International Federation for Information Processing 2006.},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
author = {Kotenko, Igor and Stepashkin, Mikhail},
year = {2006},
keywords = {Network security, Network attacks, Risk assessment, Security metrics, Vulnerability assessment},
pages = {216--227},
file = {KotenkoS06:/home/noah/Zotero/storage/G6VN5RXV/KotenkoS06.pdf:application/pdf},
}
@article{li_combining_2019,
title = {Combining {OpenCL} and {MPI} to support heterogeneous computing on a cluster},
issn = {9781450372275},
doi = {10.1145/3332186.3333059},
abstract = {This paper presents an implementation of a heterogeneous programming model which combines Open Computing Language (OpenCL) and Message Passing Interface (MPI). The model is applied to solving a Markov decision process (MDP) with value iteration method. The performance test is conducted on a high performance computing cluster. At peak performance, the model is able to achieve a 57X speedup over a serial implementation. For an extremely large input MDP, which has 1,000,000 states, the obtained speedup is still over 12X, showing that this heterogeneous programming model can solve MDPs more efficiently than the serial solver does.},
journal = {ACM International Conference Proceeding Series},
author = {Li, Ming and Hawrylak, Peter and Hale, John},
year = {2019},
keywords = {Heterogeneous computing, HPC, MDP, MPI, OpenCL, Parallelism},
file = {Combining OpenCL and MPI to Support Heterogeneous Computing on a Cluster:/home/noah/Zotero/storage/TXHCQ5S8/Combining OpenCL and MPI to Support Heterogeneous Computing on a Cluster.pdf:application/pdf},
}
@article{li_concurrency_2019,
title = {Concurrency {Strategies} for {Attack} {Graph} {Generation}},
issn = {9781728120805},
doi = {10.1109/ICDIS.2019.00033},
abstract = {The network attack graph is a powerful tool for analyzing network security, but the generation of a large-scale graph is non-trivial. The main challenge is from the explosion of network state space, which greatly increases time and storage costs. In this paper, three parallel algorithms are proposed to generate scalable attack graphs. An OpenMP-based programming implementation is used to test their performance. Compared with the serial algorithm, the best performance from the proposed algorithms provides a 10X speedup.},
journal = {Proceedings - 2019 2nd International Conference on Data Intelligence and Security, ICDIS 2019},
author = {Li, Ming and Hawrylak, Peter and Hale, John},
year = {2019},
keywords = {Attack Graph, Multi-threaded Programming, Network Security, OpenMP},
pages = {174--179},
file = {Ming_LI_Thesis:/home/noah/Zotero/storage/CLSLS335/Ming_LI_Thesis.pdf:application/pdf},
}
@phdthesis{louthan_hybrid_2011,
title = {Hybrid {Attack} {Graphs} for {Modeling} {Cyber}-{Physical} {Systems}},
author = {Louthan, G},
school = {The {University} of {Tulsa}},
year = {2011},
keywords = {icle},
file = {louthan_thesis:/home/noah/Zotero/storage/5SBCLYA3/louthan_thesis.pdf:application/pdf},
}
@article{louthan_hybrid_2014,
title = {Hybrid extensions for stateful attack graphs},
issn = {9781450328128},
doi = {10.1145/2602087.2602106},
abstract = {Critical infrastructures and safety critical systems increasingly rely on the carefully orchestrated interactions between computers, networks and kinetic elements. The dominant formalisms for modeling such hybrid systems (those with discrete and continuous components) are geared towards simple reactive systems working in isolation. By contrast, modern cyber-physical systems depend on highly interconnected computational components and often function in potentially hostile environments. This paper describes linguistic and type extensions to the stateful attack graph, which models the functional nature of attacks on purely discrete infor-mation systems, to include continuous system elements and time evolution. The resulting formalism is called the hybrid attack graph, which captures an integrated view of the vulnerability space between information systems and a restricted but useful set of hybrid systems. Copyright is held by the owner/author(s).},
journal = {ACM International Conference Proceeding Series},
author = {Louthan, George and Haney, Michael and Hardwicke, Phoebe and Hawrylak, Peter and Hale, John},
year = {2014},
pages = {101--104},
file = {George Conf 2014:/home/noah/Zotero/storage/955QTCLX/George Conf 2014.pdf:application/pdf},
}
@article{louthan_toward_2011,
title = {Toward hybrid attack dependency graphs},
issn = {9781450309455},
doi = {10.1145/2179298.2179368},
abstract = {This extended abstract presents a set of continuous-domain extensions to the attack graph, a formalism used to model the interactions of multiple exploits and assets in a network. These extensions result in a new modeling framework called the hybrid attack dependency graph, which provides the novel capability of modeling continuous state variables and their evolution over the execution of attacks with duration. Copyright 2011 ACM.},
journal = {ACM International Conference Proceeding Series},
author = {Louthan, George and Hardwicke, Phoebe and Hawrylak, Peter and Hale, John},
year = {2011},
file = {George Conf 2011:/home/noah/Zotero/storage/4VRAG9R8/George Conf 2011.pdf:application/pdf},
}
@article{lye_game_2005,
title = {Game strategies in network security},
volume = {4},
doi = {10.1007/s10207-004-0060-x},
abstract = {This paper presents a game-theoretic method for analyzing the security of computer networks. We view the interactions between an attacker and the administrator as a two-player stochastic game and construct a model for the game. Using a nonlinear program, we compute Nash equilibria or best-response strategies for the players (attacker and administrator). We then explain why the strategies are realistic and how administrators can use these results to enhance the security of their network. © Springer-Verlag 2005.},
number = {1-2},
journal = {International Journal of Information Security},
author = {Lye, Kong Wei and Wing, Jeannette M.},
year = {2005},
keywords = {Network security, Nonlinear programming, Stochastic games},
pages = {71--86},
file = {LyeWing05:/home/noah/Zotero/storage/UTVALGNI/LyeWing05.pdf:application/pdf},
}
@article{munoz-gonzalez_efficient_2017,
title = {Efficient attack graph analysis through approximate inference},
volume = {20},
doi = {10.1145/3105760},
abstract = {Attack graphs provide compact representations of the attack paths an attacker can follow to compromise network resources from the analysis of network vulnerabilities and topology. These representations are a powerful tool for security risk assessment. Bayesian inference on attack graphs enables the estimation of the risk of compromise to the system's components given their vulnerabilities and interconnections and accounts for multi-step attacks spreading through the system. While static analysis considers the risk posture at rest, dynamic analysis also accounts for evidence of compromise, for example, from Security Information and Event Management software or forensic investigation. However, in this context, exact Bayesian inference techniques do not scale well. In this article, we show how Loopy Belief Propagation-an approximate inference technique-can be applied to attack graphs and that it scales linearly in the number of nodes for both static and dynamic analysis, making such analyses viable for larger networks. We experiment with different topologies and network clustering on synthetic Bayesian attack graphs with thousands of nodes to show that the algorithm's accuracy is acceptable and that it converges to a stable solution. We compare sequential and parallel versions of Loopy Belief Propagation with exact inference techniques for both static and dynamic analysis, showing the advantages and gains of approximate inference techniques when scaling to larger attack graphs. 2017 Copyright is held by the owner/author(s).},
number = {3},
journal = {ACM Transactions on Privacy and Security},
author = {Muñoz-González, Luis and Sgandurra, Daniele and Paudice, Andrea and Lupu, Emil C.},
year = {2017},
keywords = {Approximate inference, Bayesian networks, Probabilistic graphical models},
file = {3105760:/home/noah/Zotero/storage/LLVTT2HE/3105760.pdf:application/pdf},
}
@inproceedings{natarajan_nsdminer_2012,
title = {{NSDMiner}: {Automated} discovery of {Network} {Service} {Dependencies}},
isbn = {978-1-4673-0775-8},
url = {http://ieeexplore.ieee.org/document/6195642/},
doi = {10.1109/INFCOM.2012.6195642},
booktitle = {2012 {Proceedings} {IEEE} {INFOCOM}},
publisher = {IEEE},
author = {Natarajan, Arun and {Peng Ning} and {Yao Liu} and Jajodia, Sushil and Hutchinson, Steve E.},
month = mar,
year = {2012},
pages = {2507--2515},
}
@inproceedings{noel_managing_2004,
address = {New York, New York, USA},
title = {Managing attack graph complexity through visual hierarchical aggregation},
isbn = {1-58113-974-8},
url = {http://portal.acm.org/citation.cfm?doid=1029208.1029225},
doi = {10.1145/1029208.1029225},
booktitle = {Proceedings of the 2004 {ACM} workshop on {Visualization} and data mining for computer security - {VizSEC}/{DMSEC} '04},
publisher = {ACM Press},
author = {Noel, Steven and Jajodia, Sushil},
year = {2004},
pages = {109--109},
}
@article{ou_scalable_2006,
title = {A {Scalable} {Approach} to {Attack} {Graph} {Generation}},
issn = {1595935185},
author = {Ou, Xinming and Boyer, Wayne F and Mcqueen, Miles A},
year = {2006},
journal = {CCS '06: Proceedings of the 13th ACM conference on Computer and communications security},
keywords = {attack graphs, enterprise network security, logic-programming},
pages = {336--345},
file = {1180405.1180446:/home/noah/Zotero/storage/TJKHVC4R/1180405.1180446.pdf:application/pdf},
}
@article{phillips_graph-based_1998,
title = {A graph-based system for network-vulnerability analysis},
volume = {Part F1292},
issn = {1581131682},
doi = {10.1145/310889.310919},
abstract = {This paper presents a graph-based approach to network vulnerability analysis. The method is flexible, allowing analysis of attacks from both outside and inside the network. It can analyze risks to a specific network asset, or examine the universe of possible consequences following a successful attack. The graph-based tool can identify the set of attack paths that have a high probability of success (or a low "effort" cost) for the attacker. The system could be used to test the effectiveness of making configuration changes, implementing an intrusion detection system, etc. The analysis system requires as input a database of common attacks, broken into atomic steps, specific network configuration and topology information, and an attacker profile. The attack information is "matched" with the network configuration information and an attacker profile to create a superset attack graph. Nodes identify a stage of attack, for example the class of machines the attacker has accessed and the user privilege level he or she has compromised. The arcs in the attack graph represent attacks or stages of attacks. By assigning probabilities of success on the arcs or costs representing level-of-effort for the attacker, various graph algorithms such as shortest-path algorithms can identify the attack paths with the highest probability of success.},
journal = {Proceedings New Security Paradigms Workshop},
author = {Phillips, Cynthia and Swiler, Laura Painton},
note = {doi: 10.1145/310889.310919},
year = {1998},
keywords = {Attack graph, Computer security, Network vulnerability},
pages = {71--79},
file = {310889.310919:/home/noah/Zotero/storage/JMW5DI72/310889.310919.pdf:application/pdf},
}
@article{poolsappasit_dynamic_2012,
title = {Dynamic security risk management using {Bayesian} attack graphs},
volume = {9},
doi = {10.1109/TDSC.2011.34},
abstract = {Security risk assessment and mitigation are two vital processes that need to be executed to maintain a productive IT infrastructure. On one hand, models such as attack graphs and attack trees have been proposed to assess the cause-consequence relationships between various network states, while on the other hand, different decision problems have been explored to identify the minimum-cost hardening measures. However, these risk models do not help reason about the causal dependencies between network states. Further, the optimization formulations ignore the issue of resource availability while analyzing a risk model. In this paper, we propose a risk management framework using Bayesian networks that enable a system administrator to quantify the chances of network compromise at various levels. We show how to use this information to develop a security mitigation and management plan. In contrast to other similar models, this risk model lends itself to dynamic analysis during the deployed phase of the network. A multiobjective optimization platform provides the administrator with all trade-off information required to make decisions in a resource constrained environment. © 2011 IEEE.},
number = {1},
journal = {IEEE Transactions on Dependable and Secure Computing},
author = {Poolsappasit, Nayot and Dewri, Rinku and Ray, Indrajit},
year = {2012},
note = {Publisher: IEEE},
keywords = {attack graph, Bayesian belief networks, mitigation analysis, Security risk assessment},
pages = {61--74},
file = {05936075:/home/noah/Zotero/storage/CIZGS9CN/05936075.pdf:application/pdf},
}
@article{rehman_exploring_2020,
title = {Exploring accelerator and parallel graph algorithmic choices for temporal graphs},
issn = {9781450375221},
doi = {10.1145/3380536.3380540},
abstract = {Many real-world systems utilize graphs that are time-varying in nature, where edges appear and disappear with respect to time. Moreover, the weights of different edges are also a function of time. Various conventional graph algorithms, such as single source shortest path (SSSP) have been developed for time-varying graphs. However, these algorithms are sequential in nature and their parallel counterparts are largely overlooked. On the other hand, parallel algorithms for static graphs are implemented as ordered and unordered variants. Unordered implementations do not enforce local or global order for processing tasks in parallel, but incur redundant task processing to converge their solutions. These implementations expose parallelism at the cost of high redundant work. Relax-ordered implementations maintain local order through per-core priority queues to reduce the amount of redundant work, while exposing parallelism. Finally, strict-ordered implementations achieve the work efficiency of sequential version by enforcing a global order at the expense of high thread synchronizations. These parallel implementations are adopted for temporal graphs to explore the choices that provide optimal performance on different parallel accelerators. This work shows that selecting the optimal parallel implementation extracts geometric performance gain of 46.38\% on Intel Xeon-40 core and 20.30\% on NVidia GTX-1080 GPU. It is also shown that optimal implementation choices for temporal graphs are not always the same as their respective static graphs.},
journal = {Proceedings of the 11th International Workshop on Programming Models and Applications for Multicores and Manycores, PMAM 2020},
author = {Rehman, Akif and Ahmad, Masab and Khan, Omer},
year = {2020},
keywords = {graph algorithms, multicores, performance scaling, static graphs, temporal graphs},
file = {Exploring accelerator and parallel graph:/home/noah/Zotero/storage/62AF6Z75/Exploring accelerator and parallel graph.pdf:application/pdf},
}
@article{ruifang_application_2019,
title = {Application of {Improved} {Dijkstra} {Algorithm} in {Two}-dimensional {Path} {Planning} {Problem}},
issn = {9781450361910},
doi = {10.1145/3378065.3378106},
abstract = {The Dijkstra algorithm is a typical single-source shortest path algorithm for calculating the shortest path from one node to the other in a non-negative weight map, but its use of the roulette method greatly affects the node selection. Speed and efficiency. Therefore, on the basis of ensuring the search accuracy, this paper improves the initial Dijkstra algorithm to improve the efficiency of the algorithm and meet its needs in 2D or 3D path planning. In this paper, MATLAB2018a is used as the experimental platform to simulate the initial Dijkstra algorithm and the improved Dijkstra algorithm. The experimental results show that the improved algorithm greatly reduces the path planning vision and improves the operating efficiency.},
journal = {ACM International Conference Proceeding Series},
author = {Ruifang, Zhang and Tianyi, Ji and Haitao, Zheng},
year = {2019},
keywords = {Dijkstra algorithm, Greedy Algorithm, Shortest Path Planning},
pages = {211--215},
file = {Application of Improved Dijkstra Algorithm in:/home/noah/Zotero/storage/T5RF74IL/Application of Improved Dijkstra Algorithm in.pdf:application/pdf},
}
@article{schneier_modeling_1999,
title = {Modeling {Security} {Threats}},
url = {https://www.schneier.com/academic/archives/1999/12/attack_trees.html},
author = {Schneier, Bruce},
year = {1999},
journal = {Dr. Dobb's Journal},
note = {vol. 24, no.12}
}
@article{sheyner_automated_2002,
title = {Automated {Generation} and {Analysis} of {Attack} {Graphs}},
issn = {9781787284395},
journal = {Proceeding of 2002 IEEE Symposium on Security and Privacy},
author = {Sheyner, O. and Haines, J. and Jha, S. and Lippmann, R.. and Wing, J.},
year = {2002},
pages = {254--265},
file = {sheyner-wing02:/home/noah/Zotero/storage/BV6NHT6L/sheyner-wing02.pdf:application/pdf},
}
@article{slota_scalable_2019,
title = {Scalable generation of graphs for benchmarking {HPC} community-detection algorithms},
issn = {9781450362290},
doi = {10.1145/3295500.3356206},
abstract = {Community detection in graphs is a canonical social network analysis method. We consider the problem of generating suites of teras-cale synthetic social networks to compare the solution quality of parallel community-detection methods. The standard method, based on the graph generator of Lancichinetti, Fortunato, and Radicchi (LFR), has been used extensively for modest-scale graphs, but has inherent scalability limitations. We provide an alternative, based on the scalable Block Two-Level Erdos-Renyi (BTER) graph generator, that enables HPC-scale evaluation of solution quality in the style of LFR. Our approach varies community coherence, and retains other important properties. Our methods can scale real-world networks, e.g., to create a version of the Friendster network that is 512 times larger. With BTER's inherent scalability, we can generate a 15-terabyte graph (4.6B vertices, 925B edges) in just over one minute. We demonstrate our capability by showing that label-propagation community-detection algorithm can be strong-scaled with negligible solution-quality loss.},
journal = {International Conference for High Performance Computing, Networking, Storage and Analysis, SC},
author = {Slota, George M. and Berry, Jonathan W. and Hammond, Simon D. and Olivier, Stephen L. and Phillips, Cynthia A. and Rajamanickam, Sivasankaran},
year = {2019},
file = {Scalable Generation of Graphs for Benchmarking HPC Community-:/home/noah/Zotero/storage/3TNXNCHL/Scalable Generation of Graphs for Benchmarking HPC Community-.pdf:application/pdf},
}
@inproceedings{swiler_computer-attack_2001,
address = {Anaheim, CA},
title = {Computer-attack graph generation tool},
volume = {2},
isbn = {0-7695-1212-7},
url = {http://ieeexplore.ieee.org/document/932182/},
doi = {10.1109/DISCEX.2001.932182},
booktitle = {Proceedings {DARPA} {Information} {Survivability} {Conference} and {Exposition} {II}. {DISCEX}'01},
publisher = {IEEE Comput. Soc},
author = {Swiler, L.P. and Phillips, Cynthia and Ellis, D. and Chakerian, S.},
year = {2001},
pages = {307--321},
}
@article{tang_graph_2016,
title = {Graph {Stream} {Summarization}},
issn = {9781450335317},
doi = {10.1145/2882903.2915223},
abstract = {A graph stream, which refers to the graph with edges be-ing updated sequentially in a form of a stream, has impor-tant applications in cyber security and social networks. Due to the sheer volume and highly dynamic nature of graph streams, the practical way of handling them is by summa-rization. Given a graph stream G, directed or undirected, the problem of graph stream summarization is to summarize G as SG with a much smaller (sublinear) space, linear con-struction time and constant maintenance cost for each edge update, such that SG allows many queries over G to be ap-proximately conducted e The widely used practice of summarizing data streams is to treat each stream element independently by e.g., hash-or sample-based methods, with-out maintaining the connections (or relationships) between elements. Hence, existing methods can only solve ad-hoc problems, without supporting diversified and complicated analytics over graph streams. We present TCM, a novel graph stream summary. Given an incoming edge, it sum-marizes both node and edge information in constant time. Consequently, the summary forms a graphical sketch where edges capture the connections inside elements, and nodes maintain relationships across elements. We discuss a wide range of supported queries and establish some error bounds. In addition, we experimentally show that TCM can e↵ec-tively and e support analytics over graph streams beyond the power of existing sketches, which demonstrates its potential to start a new line of research and applications in graph stream management.},
author = {Tang, Nan and Chen, Qing and Mitra, Prasenjit},
year = {2016},
keywords = {all or part of, classroom use is granted, copies are not made, data streams, graph streams, or, or distributed, or hard copies of, permission to make digital, sketch, summarization, this work for personal, without fee provided that},
pages = {1481--1496},
file = {Graph Stream Summarization:/home/noah/Zotero/storage/PDWAJUHM/Graph Stream Summarization.pdf:application/pdf},
}
@phdthesis{west_critical_2019,
title = {Critical {Digital} {Asset} {Identification} and {Attack} {Graph} {State} {Estimation} for {Nuclear} {Research} {Reactors}},
author = {West, J.C},
year = {2019},
file = {Codi_West_Masters_Thesis_4_18_19_Final_Library_Ver:/home/noah/Zotero/storage/Z9YJEZQL/Codi_West_Masters_Thesis_4_18_19_Final_Library_Ver.pdf:application/pdf},
}
@article{yan_exploiting_2014,
title = {Exploiting fine-grained parallelism in graph traversal algorithms via lock virtualization on multi-core architecture},
volume = {69},
url = {http://link.springer.com/10.1007/s11227-014-1239-1},
doi = {10.1007/s11227-014-1239-1},
number = {3},
journal = {The Journal of Supercomputing},
author = {Yan, Jie and Tan, Guangming and Sun, Ninghui},
month = sep,
year = {2014},
pages = {1462--1490},
}
@article{yang_high_2018,
title = {High performance graph analytics with productivity on hybrid {CPU}-{GPU} platforms},
issn = {9781450363372},
doi = {10.1145/3195612.3195614},
abstract = {In recent years, the rapid-growing scales of graphs have sparked a lot of parallel graph analysis frameworks to leverage the massive hardware resources on CPUs or GPUs. Existing CPU implementations are time-consuming, while GPU implementations are restricted by the memory space and the complexity of programming. In this paper, we present a high performance hybrid CPU-GPU parallel graph analytics framework with good productivity based on GraphMat. We map vertex programs to generalized sparse matrix vector multiplication on GPUs to deliver high performance, and propose a high-level abstraction for developers to implement various graph algorithms with relatively little efforts. Meanwhile, several optimizations have been adopted for reducing the communication cost and leveraging hardware resources, especially the memory hierarchy. We evaluate the proposed framework on three graph primitives (PageRank, BFS and SSSP) with large-scale graphs. The experimental results show that, our implementation achieves an average speedup of 7.0X than GraphMat on two 6-core Intel Xeon CPUs. It also has the capability to process larger datasets but achieves comparable performance than MapGraph, a state-of-the-art GPU-based framework.},
journal = {ACM International Conference Proceeding Series},
author = {Yang, Haoduo and Su, Huayou and Lan, Qiang and Wen, Mei and Zhang, Chunyuan},
year = {2018},
keywords = {Graph analytics, Hybrid CPU-GPU, Parallel computing},
pages = {17--21},
file = {High Performance Graph Analytics with Productivity:/home/noah/Zotero/storage/H2PP3724/High Performance Graph Analytics with Productivity.pdf:application/pdf},
}
@article{yao_efficient_2018,
title = {An efficient graph accelerator with parallel data conflict management},
issn = {9781450359863},
doi = {10.1145/3243176.3243201},
abstract = {Graph-specific computing with the support of dedicated accelerator has greatly boosted the graph processing in both efficiency and energy. Nevertheless, their data conflict management is still sequential when certain vertex needs a large number of conflicting updates at the same time, leading to prohibitive performance degradation. This is particularly true and serious for processing natural graphs. In this paper, we have the insight that the atomic operations for the vertex updating of many graph algorithms (e.g., BFS, PageRank, andWCC) are typically incremental and simplex. This hence allows us to parallelize the conflicting vertex updates in an accumulative manner.We architect AccuGraph, a novel graph-specific accelerator that can simultaneously process atomic vertex updates for massive parallelism while ensuring the correctness. A parallel accumulator is designed to remove the serialization in atomic protections for conflicting vertex updates through merging their results in parallel. Our implementation on Xilinx FPGA with a wide variety of typical graph algorithms shows that our accelerator achieves an average throughput by 2.36 GTEPS as well as up to 3.14x performance speedup in comparison with state-of-the-art ForeGraph (with its single-chip version).},
journal = {Parallel Architectures and Compilation Techniques - Conference Proceedings, PACT},
author = {Yao, Pengcheng and Zheng, Long and Liao, Xiaofei and Jin, Hai and He, Bingsheng},
year = {2018},
file = {An efficient graph accelerator with parallel data conflict management:/home/noah/Zotero/storage/NMA7DQ5B/An efficient graph accelerator with parallel data conflict management.pdf:application/pdf},
}
@article{zeng_cyber_2017,
title = {Cyber {Attack} {Analysis} {Based} on {Markov} {Process} {Model}},
author = {Zeng, Keming},
year = {2017},
file = {keming_thesis:/home/noah/Zotero/storage/LQY2YWSR/keming_thesis.pdf:application/pdf},
}
@article{zhang_boosting_2017,
title = {Boosting the performance of {FPGA}-based graph processor using hybrid memory cube: {A} case for breadth first search},
issn = {9781450343541},
doi = {10.1145/3020078.3021737},
abstract = {Large graph processing has gained great attention in recent years due to its broad applicability from machine learning to social science. Large real-world graphs, however, are inherently difficult to process efficiently, not only due to their large memory footprint, but also that most graph algorithms entail memory access patterns with poor locality and a low compute-to-memory access ratio. In this work, we leverage the exceptional random access performance of emerging Hybrid Memory Cube (HMC) technology that stacks multiple DRAM dies on top of a logic layer, combined with the flexibility and efficiency of FPGA to address these challenges. To our best knowledge, this is the first work that implements a graph processing system on a FPGA-HMC platform based on software/hardware co-design and co-optimization. We first present the modifications of algorithm and a platform-aware graph processing architecture to perform level-synchronized breadth first search (BFS) on FPGA-HMC platform. To gain better insights into the potential bottlenecks of proposed implementation, we develop an analytical performance model to quantitatively evaluate the HMC access latency and corresponding BFS performance. Based on the analysis, we propose a two-level bitmap scheme to further reduce memory access and perform optimization on key design parameters (e.g. memory access granularity). Finally, we evaluate the performance of our BFS implementation using the AC-510 development kit from Micron. We achieved 166 million edges traversed per second (MTEPS) using GRAPH500 benchmark on a random graph with a scale of 25 and an edge factor of 16, which significantly outperforms CPU and other FPGA-based large graph processors.},
journal = {FPGA 2017 - Proceedings of the 2017 ACM/SIGDA International Symposium on Field-Programmable Gate Arrays},
author = {Zhang, Jialiang and Khoram, Soroosh and Li, Jing},
year = {2017},
pages = {207--216},
file = {Boosting the Performance of FPGA-based Graph Processor using Hybrdi Memory Cube:/home/noah/Zotero/storage/CDKPUXYF/Boosting the Performance of FPGA-based Graph Processor using Hybrdi Memory Cube.pdf:application/pdf},
}
@book{pacheco_introduction_2011,
edition = {Print},
title = {An {Introduction} to {Parallel} {Programming}},
isbn = {978-0-12-374260-5},
publisher = {Morgan Kaufmann},
author = {Pacheco, Peter},
year = {2011}
}
@article{jost_comparing_nodate,
title = {Comparing the {OpenMP}, {MPI}, and {Hybrid} {Programming} {Paradigms} on an {SMP} {Cluster}},
abstract = {Clusters of SMP (Symmetric Multi-Processors) nodes provide support for a wide range of parallel programming paradigms. The shared address space within each node is suitable for OpenMP parallelization. Message passing can be employed within and across the nodes of a cluster. Multiple levels of parallelism can be achieved by combining message passing and OpenMP parallelization. Which programming paradigm is the best will depend on the nature of the given problem, the hardware components of the cluster, the network, and the available software. In this study we compare the performance of different implementations of the same Computational Fluid Dynamics (CFD) benchmark application, using the same numerical algorithm but employing different programming paradigms.},
language = {en},
author = {Jost, Gabriele and Jin, Haoqiang},
pages = {10},
file = {Jost and Jin - Comparing the OpenMP, MPI, and Hybrid Programming .pdf:/home/noah/Zotero/storage/BF3EDZSK/Jost and Jin - Comparing the OpenMP, MPI, and Hybrid Programming .pdf:application/pdf},
}
@article{doekemeijer_survey_nodate,
title = {A {Survey} of {Parallel} {Graph} {Processing} {Frameworks}},
abstract = {As graph analysis tasks see a significant growth in complexity - as exposed by recent advances in complex networks analysis, information retrieval and data mining, and even logistics - the productivity of deploying such complex graph processing applications becomes a significant bottleneck. Therefore, many programming paradigms, models, frameworks - graph processing systems all together - have been proposed to tackle this challenge. In the same time, many data collections have exploded in size, posing huge performance problems. Modern graph processing systems strive to find the best balance between simple, user-friendly and productivity-enhancing front-ends and high-performance back-ends for the analyses they enable.},
language = {en},
author = {Doekemeijer, Niels},
pages = {30},
file = {Doekemeijer - A Survey of Parallel Graph Processing Frameworks.pdf:/home/noah/Zotero/storage/PFXNZFXH/Doekemeijer - A Survey of Parallel Graph Processing Frameworks.pdf:application/pdf},
}
@inproceedings{malewicz_pregel_2010,
title = {Pregel: a system for large-scale graph processing},
doi = {https://doi.org/10.1145/1807167.1807184},
abstract = {Many practical computing problems concern large graphs. Standard examples include the Web graph and various social networks. The scale of these graphs—in some cases billions of vertices, trillions of edges—poses challenges to their efficient processing. In this paper we present a computational model suitable for this task. Programs are expressed as a sequence of iterations, in each of which a vertex can receive messages sent in the previous iteration, send messages to other vertices, and modify its own state and that of its outgoing edges or mutate graph topology. This vertexcentric approach is flexible enough to express a broad set of algorithms. The model has been designed for efficient, scalable and fault-tolerant implementation on clusters of thousands of commodity computers, and its implied synchronicity makes reasoning about programs easier. Distributionrelated details are hidden behind an abstract API. The result is a framework for processing large graphs that is expressive and easy to program.},
language = {en},
author = {Malewicz, Grzegorz and Austern, Matthew H and Bik, Aart J C and Dehnert, James C and Horn, Ilan and Leiser, Naty and Czajkowski, Grzegorz},
month = jun,
year = {2010},
pages = {11},
file = {Malewicz et al. - Pregel a system for large-scale graph processing.pdf:/home/noah/Zotero/storage/R8S2PMJU/Malewicz et al. - Pregel a system for large-scale graph processing.pdf:application/pdf},
}
@phdthesis{kalavri_performance_2016,
address = {Stockholm, Sweden},
type = {{PhD}},
title = {Performance {Optimization} {Techniques} and {Tools} for {Distributed} {Graph} {Processing}},
school = {KTH Royal Institute of Technology},
author = {Kalavri, Vasiliki},
year = {2016},
}
@article{ammar_experimental_2018,
title = {Experimental {Analysis} of {Distributed} {Graph} {Systems}},
volume = {11},
doi = {10.14778/3231751.3231764},
abstract = {This paper evaluates eight parallel graph processing systems: Hadoop, HaLoop, Vertica, Giraph, GraphLab (PowerGraph), Blogel, Flink Gelly, and GraphX (SPARK) over four very large datasets (Twitter, World Road Network, UK 200705, and ClueWeb) using four workloads (PageRank, WCC, SSSP and K-hop). The main objective is to perform an independent scale-out study by experimentally analyzing the performance, usability, and scalability (using up to 128 machines) of these systems. In addition to performance results, we discuss our experiences in using these systems and suggest some system tuning heuristics that lead to better performance.},
number = {10},
urldate = {2021-04-02},
journal = {Proceedings of the VLDB Endowment},
author = {Ammar, Khaled and Ozsu, Tamer},
month = jun,
year = {2018},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing},
annote = {Comment: Volume 11 of Proc. VLDB Endowment},
file = {arXiv Fulltext PDF:/home/noah/Zotero/storage/QJA73MYR/Ammar and Ozsu - 2018 - Experimental Analysis of Distributed Graph Systems.pdf:application/pdf;arXiv.org Snapshot:/home/noah/Zotero/storage/TTUFSAHW/1806.html:text/html},
}
@article{mccune_thinking_2015,
title = {Thinking {Like} a {Vertex}: {A} {Survey} of {Vertex}-{Centric} {Frameworks} for {Large}-{Scale} {Distributed} {Graph} {Processing}},
volume = {48},
doi = {10.1145/2818185},
number = {2},
journal = {ACM Computing Surveys},
author = {McCune, Robert and Weninger, Tim and Madey, Greg},
year = {2015},
}
@inproceedings{dimov_pass--hash_2017,
title = {Pass-the-{Hash}: {One} of the {Most} {Prevalent} {Yet} {Underrated} {Attacks} for {Credentials} {Theft} and {Reuse}},
doi = {10.1145/3134302.3134338},
booktitle = {18th {International} {Conference} on {Computer} {Systems} and {Technologies}},
author = {Dimov, Dimo and Tzonev, Yulian},
year = {2017},
pages = {149--154},
}
@inproceedings{baloyi_guidelines_2019,
address = {Skukuza South Africa},
title = {Guidelines for {Data} {Privacy} {Compliance}: {A} {Focus} on {Cyberphysical} {Systems} and {Internet} of {Things}},
doi = {10.1145/3351108.3351143},
booktitle = {{SAICSIT} '19: {Proceedings} of the {South} {African} {Institute} of {Computer} {Scientists} and {Information} {Technologists} 2019},
publisher = {Association for Computing Machinery},
author = {Baloyi, Ntsako and Kotzé, Paula},
year = {2019},
}
@article{allman_complying_2006,
title = {Complying with {Compliance}: {Blowing} it off is not an option.},
volume = {4},
number = {7},
journal = {ACM Queue},
author = {Allman, Eric},
year = {2006},
}
@article{kalavri_shortest_2016,
title = {The shortest path is not always a straight line: leveraging semi-metricity in graph analysis},
volume = {9},
doi = {10.14778/2947618.2947623},
number = {9},
journal = {Proceedings of the VLDB Endowment},
author = {Kalavri, Vasiliki and Simas, Tiago and Logothetis, Dionysios},
year = {2016},
}
@inproceedings{schneck_survey_1973,
title = {A survey of compiler optimization techniques},
doi = {10.1145/800192.805690},
booktitle = {{ACM} '73: {Proceedings} of the {ACM} annual conference},
author = {Schneck, Paul},
month = aug,
year = {1973},
pages = {106--113},
annote = {old paper but gives good background info},
file = {Schneck - 1973 - A survey of compiler optimization techniques.pdf:/home/noah/Zotero/storage/U5BLY5DQ/Schneck - 1973 - A survey of compiler optimization techniques.pdf:application/pdf},
}
@inproceedings{haneda_optimizing_2005,
title = {Optimizing {General} {Purpose} {Compiler} {Optimization}},
doi = {10.1145/1062261.1062293},
booktitle = {{CF} '05: {Proceedings} of the 2nd conference on {Computing} frontiers},
author = {Haneda, M and Knijnenburg, P.M.W. and Wijshoff, H.A.G.},
month = may,
year = {2005},
pages = {180--188},
}
@article{zhang_reducing_2004,
title = {Reducing instruction cache energy consumption using a compiler-based strategy},
volume = {1},
doi = {10.1145/980152.980154},
number = {1},
journal = {ACM Transactions on Architecture and Code Optimization},
author = {Zhang, W. and Hu, J.S. and Degalahal, V. and Vijaykrishnan, N. and Irwin, M.J.},
month = mar,
year = {2004},
pages = {3--33},
annote = {ilp work, "loop fission (distribution)"},
file = {Zhang et al. - 2004 - Reducing instruction cache energy consumption usin.pdf:/home/noah/Zotero/storage/Z8G4I2GJ/Zhang et al. - 2004 - Reducing instruction cache energy consumption usin.pdf:application/pdf},
}
@inproceedings{psarris_impact_2003,
title = {The impact of data dependence analysis on compilation and program parallelization},
doi = {10.1145/782814.782843},
booktitle = {{ICS} '03: {Proceedings} of the 17th annual international conference on {Supercomputing}},
author = {Psarris, Kleanthis and Kyriakopoulos, Konstantinos},
month = jun,
year = {2003},
pages = {205--214},
annote = {good info on dependence analysis},
file = {Psarris and Kyriakopoulos - 2003 - The impact of data dependence analysis on compilat.pdf:/home/noah/Zotero/storage/5FV3AAPY/Psarris and Kyriakopoulos - 2003 - The impact of data dependence analysis on compilat.pdf:application/pdf},
}
@article{cui_layout-oblivious_2013,
title = {Layout-oblivious compiler optimization for matrix computations},
volume = {9},
doi = {10.1145/2400682.2400694},
number = {4},
journal = {ACM Transactions on Architecture and Code Optimization},
author = {Cui, Huimin and Yi, Qing and Xue, Jingling and Feng, Xiaobing},
month = jan,
year = {2013},
annote = {updated info on dependency analysis for cache optimization (matrix) section},
file = {Cui et al. - 2013 - Layout-oblivious compiler optimization for matrix .pdf:/home/noah/Zotero/storage/4XRLDYQA/Cui et al. - 2013 - Layout-oblivious compiler optimization for matrix .pdf:application/pdf},
}
@misc{amarasinghe_compiler_2020,
title = {Compiler 2.0: {Using} {Machine} {Learning} to {Modernize} {Compiler} {Technology}},
author = {Amarasinghe, Saman},
month = jun,
year = {2020},
file = {Amarasinghe - 2020 - Compiler 2.0 Using Machine Learning to Modernize .pdf:/home/noah/Zotero/storage/D8MF2UZR/Amarasinghe - 2020 - Compiler 2.0 Using Machine Learning to Modernize .pdf:application/pdf},
}
@article{mccandless_compiler_2012,
title = {Compiler techniques to improve dynamic branch prediction for indirect jump and call instructions},
volume = {8},
doi = {10.1145/2086696.2086703},
number = {4},
journal = {ACM Transactions on Architecture and Code Optimization},
author = {Mccandless, Jason and Gregg, David},
month = jan,
year = {2012},
annote = {NOP Insertion, Reordering, Hybrid. Has pics},
file = {Mccandless and Gregg - 2012 - Compiler techniques to improve dynamic branch pred.pdf:/home/noah/Zotero/storage/LSNLMMDB/Mccandless and Gregg - 2012 - Compiler techniques to improve dynamic branch pred.pdf:application/pdf},
}
@inproceedings{tan_energy-efficient_2012,
address = {Europe},
title = {Energy-efficient branch prediction with compiler-guided history stack},
booktitle = {{DATE} '12: {Proceedings} of the {Conference} on {Design}, {Automation}, and {Test}},
author = {Tan, Mingxing and Liu, Xianhua and Xie, Zichao and Tong, Dong and Cheng, Xu},
month = mar,
year = {2012},
pages = {449--454},
annote = {compiler-guided history stack},
file = {Tan et al. - 2012 - Energy-efficient branch prediction with compiler-g.pdf:/home/noah/Zotero/storage/BMXL7SX7/Tan et al. - 2012 - Energy-efficient branch prediction with compiler-g.pdf:application/pdf},
}
@inproceedings{leopoldseder_fast-path_2018,
title = {Fast-path loop unrolling of non-counted loops to enable subsequent compiler optimizations},
doi = {10.1145/3237009.3237013},
booktitle = {{ManLang} '18: {Proceedings} of the 15th {International} {Conference} on {Managed} {Languages} \& {Runtimes}},
author = {Leopoldseder, David and Schatz, Roland and Stadler, Lukas and Rigger, Manuel and Würthinger, Thomas and Mössenböck, Hanspeter},
month = sep,
year = {2018},
pages = {1--13},
annote = {Loop unrolling},
file = {Leopoldseder et al. - 2018 - Fast-path loop unrolling of non-counted loops to e.pdf:/home/noah/Zotero/storage/A6PM49XV/Leopoldseder et al. - 2018 - Fast-path loop unrolling of non-counted loops to e.pdf:application/pdf},
}
@article{bacon_compiler_1994,
title = {Compiler transformations for high-performance computing},
volume = {26},
doi = {10.1145/197405.197406},
number = {4},
journal = {ACM Computing Surveys},
author = {Bacon, David F. and Graham, Susan L. and Sharp, Oliver J.},
month = dec,
year = {1994},
annote = {Old predictive heuristics},
file = {Bacon et al. - 1994 - Compiler transformations for high-performance comp.pdf:/home/noah/Zotero/storage/DQ4XXYVL/Bacon et al. - 1994 - Compiler transformations for high-performance comp.pdf:application/pdf},
}
@inproceedings{granston_automatic_2001,
title = {Automatic {Recommendation} of {Compiler} {Options}},
booktitle = {Proceedings 4th {Feedback} {Directed} {Optimization} {Workshop}},
author = {Granston, Elana and Holler, Anne},
month = dec,
year = {2001},
annote = {predictive heuristics},
file = {Granston and Holler - 2001 - Automatic Recommendation of Compiler Options.pdf:/home/noah/Zotero/storage/YCWJYSNK/Granston and Holler - 2001 - Automatic Recommendation of Compiler Options.pdf:application/pdf},
}
@inproceedings{ashouri_predictive_2016,
title = {Predictive modeling methodology for compiler phase-ordering},
doi = {10.1145/2872421.2872424},
booktitle = {{PARMA}-{DITAM} '16: {Proceedings} of the 7th {Workshop} on {Parallel} {Programming} and {Run}-{Time} {Management} for {Many}-core {Architectures} and the 5th {Workshop} on {Design} {Tools} and {Architectures} for {Multicore} {Embedded} {Computing} {Platforms}},
author = {Ashouri, Amir Hossein and Bignoli, Andrea and Palermo, Gianluca and Silvano, Cristina},
month = jan,
year = {2016},
pages = {7--12},
annote = {predictive modeling},
file = {Ashouri et al. - 2016 - Predictive modeling methodology for compiler phase.pdf:/home/noah/Zotero/storage/APLJJCKR/Ashouri et al. - 2016 - Predictive modeling methodology for compiler phase.pdf:application/pdf},
}
@inproceedings{robison_impact_2001,
title = {Impact of economics on compiler optimization},
doi = {10.1145/376656.376751},
booktitle = {{JGI} '01: {Proceedings} of the 2001 joint {ACM}-{ISCOPE} conference on {Java} {Grande}},
author = {Robison, Arch D.},
month = jun,
year = {2001},
pages = {1--10},
file = {Robison - 2001 - Impact of economics on compiler optimization.pdf:/home/noah/Zotero/storage/3HYQ4P7B/Robison - 2001 - Impact of economics on compiler optimization.pdf:application/pdf},
}
@inproceedings{zhai_compiler_2008,
address = {Salt Lake City, Utah, USA.},
title = {Compiler optimizations for parallelizing general-purpose applications under thread-level speculation},
doi = {10.1145/1345206.1345251},
booktitle = {{PPoPP} '08: {Proceedings} of the 13th {ACM} {SIGPLAN} {Symposium} on {Principles} and practice of parallel programming},
author = {Zhai, Antonia and Wang, Shengyue and Yew, Pen-Chung and He, Guojin},
month = feb,
year = {2008},
pages = {271--272},
annote = {Thread-level speculation},
file = {Zhai et al. - 2008 - Compiler optimizations for parallelizing general-p.pdf:/home/noah/Zotero/storage/KZJS8TNL/Zhai et al. - 2008 - Compiler optimizations for parallelizing general-p.pdf:application/pdf},
}
@misc{cornuejols_tutorial_nodate,
title = {A {Tutorial} on {Integer} {Programming}},
url = {http://www.math.clemson.edu/~mjs/courses/mthsc.440/integer.pdf},
language = {en},
author = {Cornuejols, Gerard and Trick, Michael A and Saltzman, Matthew J},
file = {Cornuejols et al. - A Tutorial on Integer Programming.pdf:/home/noah/Zotero/storage/ZAJELIQH/Cornuejols et al. - A Tutorial on Integer Programming.pdf:application/pdf},
}
@inproceedings{triantafyllis_compiler_2003,
title = {Compiler optimization-space exploration},
booktitle = {{CGO} '03: {Proceedings} of the {International} {Symposium} on {Code} {Generation} and {Optimization}: {Feedback}-directed and {Runtime} {Optimization}},
author = {Triantafyllis, Sppyridon and Vachharajani, Manish and Vachharajani, Neil and August, David I.},
month = mar,
year = {2003},
pages = {204--215},
annote = {OSE
 },
file = {Haneda et al. - 2005 - Optimizing general purpose compiler optimization.pdf:/home/noah/Zotero/storage/X7DPHGAV/Haneda et al. - 2005 - Optimizing general purpose compiler optimization.pdf:application/pdf},
}
@article{whitfield_approach_1997,
title = {An approach for exploring code improving transformations},
volume = {19},
journal = {ACM Transactions on Programming Languages and Systems},
author = {Whitfield, D.L and Soffa, M.L.},
month = nov,
year = {1997},
pages = {1053--1084},
}
@inproceedings{august_framework_1997,
title = {A framework for balancing control flow and predication},
booktitle = {International {Symposium} on {Microarchitecture}},
author = {August, David I. and Hwu, W. W. and Mahlke, S. A.},
year = {1997},
pages = {92--103},
}
@inproceedings{porter_creating_2009,
title = {Creating artificial global history to improve branch prediction accuracy},
booktitle = {{ICS}-23},
author = {Porter, L and Tullsen, D. M.},
year = {2009},
pages = {266--275},
}
@book{hennessy_computer_2019,
address = {Cambridge, MA, USA},
edition = {6th},
series = {Computer {Architecture} and {Design}},
title = {Computer {Architecture}: {A} {Quantitative} {Approach}},
isbn = {978-0-12-811905-1},
publisher = {Morgan Kaufmann},
author = {Hennessy, John L. and Patterson, David A.},
year = {2019},
}
@book{aho_compilers_2007,
edition = {2nd},
title = {Compilers: {Principles}, {Techniques}, \& {Tools}},
isbn = {0-321-48681-1},
publisher = {Pearson Education, Inc},
author = {Aho, Alfred V. and Lam, Monica S. and Sethi, Ravi and Ullman, Jeffrey D.},
year = {2007},
file = {Alfred V. Aho, Monica S. Lam, Ravi Sethi, Jeffrey D. Ullman - Compilers_ principles, techniques, & tools-Pearson_Addison Wesley (2007)(1).djvu:/home/noah/Zotero/storage/DF545IKP/Alfred V. Aho, Monica S. Lam, Ravi Sethi, Jeffrey D. Ullman - Compilers_ principles, techniques, & tools-Pearson_Addison Wesley (2007)(1).djvu:image/vnd.djvu},
}
@inproceedings{calder_reducing_1994,
address = {San Jose, California, USA},
title = {Reducing {Branch} {Costs} via {Branch} {Alignment}},
doi = {10.1145/381792.195553},
booktitle = {6th {International} {Conference} on {Architectural} {Support} for {Programming} {Languages} and {Operating} {Systems}},
author = {Calder, Brad and Grunwald, Dirk},
month = oct,
year = {1994},
}
@inproceedings{stock_framework_2014,
title = {A framework for enhancing data reuse via associatve reordering},
booktitle = {Proceedings of the 35th {ACM} {SIGPLAN} {Conference} on {Programming} {Language} {Design} and {Implementation}},
author = {Stock, Kevin and Kong, Martin and Grosser, Tobias and Pouchet, Louis-Nol and Rastello, Fabrice and Ramanujam, J. and Sadayappan, P.},
month = jun,
year = {2014},
}
@article{malali_survey_2019,
title = {A {Survey} of {Compiler} {Optimization} {Techniques}},
volume = {2},
issn = {2581-5792},
number = {5},
journal = {International Journal of Research in Engineering, Science and Management},
author = {Malali, Aman Raghu and Pramod, Ananya and Wadhwa, Jugal and Alex, Sini Anna},
month = may,
year = {2019},
}
@inproceedings{merigoux_modern_2021,
title = {A modern compiler for the {French} tax code},
doi = {10.5281/zenodo.4456774},
booktitle = {Proceedings of the 30th {ACM} {SIGPLAN} {International} {Conference} on {Compiler} {Construction}},
author = {Merigoux, Denis and Monat, Raphaël and Protzenko, Jonathan},
month = mar,
year = {2021},
}
@inproceedings{arifuzzaman_fast_2015,
title = {Fast parallel conversion of edge list to adjacency list for large-scale graphs},
booktitle = {{HPC} '15: {Proceedings} of the {Symposium} on {High} {Performance} {Computing}},
author = {Arifuzzaman, Shaikh and Khan, Maleq},
month = apr,
year = {2015},
pages = {17--24},
}
@inproceedings{coppersmith_matrix_1987,
title = {Matrix multiplication via arithmetic progressions},
booktitle = {Proceedings of the 19th {Annual} {ACM} {Symposium} on {Theory} of {Computing}},
author = {Coppersmith, D. and Winograd, S.},
year = {1987},
pages = {1--6},
}
@article{alon_finding_1997,
title = {Finding and counting given length cycles},
volume = {17},
journal = {Algorithmica},
author = {Alon, N. and Yuster, R. and Zwick, U.},
year = {1997},
pages = {209--223},
}
@inproceedings{yu_construction_2018,
title = {The {Construction} of {Large} {Graph} {Data} {Structures} in a {Scalable} {Distributed} {Message} {System}},
doi = {10.1145/3234664.3234682},
booktitle = {{HPCCT} 2018: {Proceedings} of the 2018 2nd {High} {Performance} {Computing} and {Cluster} {Technologies} {Conference}},
author = {Yu, Xinjie and Chen, Wentao and Miao, Jiajia and Chen, Jian and Mao, Handong and Luo, Qiong and Gu, Lin},
month = jun,
year = {2018},
pages = {6--10},
}
@inproceedings{liakos_memory-optimized_2016,
title = {Memory-{Optimized} {Distributed} {Graph} {Processing} through {Novel} {Compression} {Techniques}},
doi = {10.1145/2983323.2983687},
booktitle = {{CIKM} '16: {Proceedings} of the 25th {ACM} {International} {Conference} on {Information} and {Knowledge} {Management}},
author = {Liakos, Panagiotis and Papakonstantinopoulou, Katia and Delis, Alex},
month = oct,
year = {2016},
pages = {2317--2322},
}
@misc{noauthor_parallel_nodate-1,
title = {Parallel {BGL} {Distributed} {Adjacency} {List} - 1.73.0},
url = {https://www.boost.org/doc/libs/1_73_0/libs/graph_parallel/doc/html/distributed_adjacency_list.html},
urldate = {2021-04-11},
file = {Parallel BGL Distributed Adjacency List - 1.73.0:/home/noah/Zotero/storage/CXKARHSV/distributed_adjacency_list.html:text/html},
}
@misc{noauthor_property_nodate,
title = {Property {Maps} {\textbar} {The} {Boost} {Graph} {Library} {\textbar} {InformIT}},
url = {https://www.informit.com/articles/article.aspx?p=25777&seqNum=6},
urldate = {2021-04-11},
file = {Property Maps | The Boost Graph Library | InformIT:/home/noah/Zotero/storage/3QX7UTM5/article.html:text/html},
}
@misc{noauthor_parallel_nodate-2,
title = {Parallel {BGL} {Distributed} {Property} {Map} - 1.64.0},
url = {https://www.boost.org/doc/libs/1_64_0/libs/graph_parallel/doc/html/distributed_property_map.html},
urldate = {2021-04-11},
file = {Parallel BGL Distributed Property Map - 1.64.0:/home/noah/Zotero/storage/WHMADIX2/distributed_property_map.html:text/html},
}
@misc{noauthor_parallel_nodate-3,
title = {Parallel {BGL} {Distributed} {Property} {Map} - 1.64.0},
url = {https://www.boost.org/doc/libs/1_64_0/libs/graph_parallel/doc/html/distributed_property_map.html},
urldate = {2021-04-11},
file = {Parallel BGL Distributed Property Map - 1.64.0:/home/noah/Zotero/storage/N9I5LKFK/distributed_property_map.html:text/html},
}
@inproceedings{balaji_graph_2016,
title = {Graph {Topology} {Abstraction} for {Distributed} {Path} {Queries}},
doi = {10.1145/2915516.2915520},
booktitle = {{HPGP} '16: {Proceedings} of the {ACM} {Workshop} on {High} {Performance} {Graph} {Processing}},
author = {Balaji, Janani and Sunderraman, Rajshekhar},
month = may,
year = {2016},
pages = {27--34},
}
@inproceedings{besta_loggraph_2018,
title = {Log(graph): a near-optimal high-performance graph representation},
booktitle = {{PACT} '18: {Proceedings} of the 27th {International} {Conference} on {Parallel} {Architectures} and {Compilation} {Techniques}},
author = {Besta, Maciej and Stanojevic, Dimitri and Zivic, Tijana and Singh, Jagpreet and Hoerold, Maurice and Hoefler, Torsten},
month = nov,
year = {2018},
pages = {1--13},
}
@inproceedings{fredj_cybersecurity_2020,
title = {{CyberSecurity} {Attack} {Prediction}: {A} {Deep} {Learning} {Approach}},
doi = {10.1145/3433174.3433614},
booktitle = {{SIN} 2020: 13th {International} {Conference} on {Security} of {Information} and {Networks}},
author = {Fredj, Ouissem Ben and Mihoub, Alaeddine and Krichen, Moez and Cheikhrouhou, Omar and Derhab, Abdelouahid},
month = nov,
year = {2020},
pages = {1--6},
}
@inproceedings{yanakiev_governance_2020,
title = {Governance {Model} of a {Cybersecurity} {Network}: {Best} {Practices} in the {Academic} {Literature}},
doi = {10.1145/3407982.3407992},
booktitle = {{CompSysTech} '20: {Proceedings} of the 21st {International} {Conference} on {Computer} {Systems} and {Technologies} '20},
author = {Yanakiev, Yantsislav and Tagarev, Todor},
month = jun,
year = {2020},
pages = {27--34},
}
@inproceedings{heman_awang_mangut_arp_2015,
address = {Helsinki, Finland},
title = {{ARP} {Cache} {Poisoning} {Mitigation} and {Forensics} {Investigation}},
isbn = {978-1-4673-7952-6},
doi = {10.1109/Trustcom.2015.536},
booktitle = {2015 {IEEE} {Trustcom}/{BigDataSE}/{ISPA}},
author = {Heman Awang Mangut and {Ameer Al-Nemrat} and Chafika Benzaïd and {Abdel-Rahman H. Tawil}},
month = aug,
year = {2015},
}
@misc{noauthor_what_nodate,
title = {What is {ARP} {Poisoning}? {\textbar} {Security} {Wiki}},
shorttitle = {What is {ARP} {Poisoning}?},
url = {https://doubleoctopus.com/security-wiki/threats-and-tools/address-resolution-protocol-poisoning/},
abstract = {Address Resolution Protocol (ARP) poisoning is when an attacker sends falsified ARP messages...},
urldate = {2021-05-05},
journal = {Secret Double Octopus},
file = {Snapshot:/home/noah/Zotero/storage/4GX2JKXM/address-resolution-protocol-poisoning.html:text/html},
}
@misc{eugene_wallingford_brief_2017,
title = {A {Brief} {History} of {Compilers}: {Why} {We} {Are} {Here}},
url = {http://faculty.chas.uni.edu/~wallingf/teaching/cs4550/readings/01-history.html},
urldate = {2021-05-10},
author = {{Eugene Wallingford}},
month = aug,
year = {2017},
file = {CS 4550 Reading\: A Brief History of Compilers:/home/noah/Zotero/storage/96JB3B8N/01-history.html:text/html},
}
@inproceedings{hui_guan_wootz_2019,
title = {Wootz: a compiler-based framework for fast {CNN} pruning via composability},
isbn = {10.1145/3314221.3314652},
booktitle = {Proceedings of the 40th {ACM} {SIGPLAN} {Conference} on {Programming} {Language} {Design} and {Implementation}},
author = {{Hui Guan} and {Xipeng Shen} and {Seung-Hwan Lim}},
month = jun,
year = {2019},
pages = {717--730},
}
@inproceedings{kateryna_muts_compiler-based_2020,
title = {Compiler-based {WCET} prediction performing function specialization},
doi = {10.1145/3378678.3391879},
booktitle = {{SCOPES} '20: {Proceedings} of the 23th {International} {Workshop} on {Software} and {Compilers} for {Embedded} {Systems}},
author = {{Kateryna Muts} and {Heiko Falk}},
month = may,
year = {2020},
pages = {32--35},
}
@inproceedings{roshan_dathathri_eva_2020,
title = {{EVA}: an encrypted vector arithmetic language and compiler for efficient homomorphic computation},
doi = {10.1145/3385412.3386023},
booktitle = {{PLDI} 2020: {Proceedings} of the 41st {ACM} {SIGPLAN} {Conference} on {Programming} {Language} {Design} and {Implementation}},
author = {{Roshan Dathathri} and {Blagovesta Kostova} and {Olli Saarikivi} and {Wei Dai} and {Kim Laine}},
month = jun,
year = {2020},
pages = {546--561},
}
@inproceedings{yu_ji_bridge_2018,
title = {Bridge the {Gap} between {Neural} {Networks} and {Neuromorphic} {Hardware} with a {Neural} {Network} {Compiler}},
doi = {10.1145/3173162.3173205},
booktitle = {{ASPLOS} '18: {Proceedings} of the {Twenty}-{Third} {International} {Conference} on {Architectural} {Support} for {Programming} {Languages} and {Operating} {Systems}},
author = {{Yu Ji} and {Youhui Zhang} and {Wenguang Chen} and {Yuan Xie}},
month = mar,
year = {2018},
pages = {448--460},
}
@phdthesis{noauthor_notitle_nodate,
}
@INPROCEEDINGS{9150145,
author={Li, Ming and Hawrylak, Peter J. and Hale, John},
booktitle={2020 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)},
title={Implementing an Attack Graph Generator in CUDA},
year={2020},
volume={},
number={},
pages={730-738},
doi={10.1109/IPDPSW50202.2020.00128}}
@ARTICLE{7087377, author={Kaynar, Kerem and Sivrikaya, Fikret}, journal={IEEE Transactions on Dependable and Secure Computing}, title={Distributed Attack Graph Generation}, year={2016}, volume={13}, number={5}, pages={519-532}, doi={10.1109/TDSC.2015.2423682}}
@misc{lawrence_livermore_national_laboratory_mpip_nodate,
title = {mpi{P}, A light-weight {MPI} profiler},
shorttitle = {A light-weight {MPI} profiler.},
note = {https://software.llnl.gov/mpiP/},
author = {{Lawrence Livermore National Laboratory}},
}
@misc{noauthor_sarbanes-oxley_2002,
title = {Sarbanes-{Oxley} {Act} of 2002},
note = {Pub. L. No. 107-204. 2002 [Online]. Available: https://www.govinfo.gov/content/pkg/PLAW-107publ204/html/PLAW-107publ204.htm},
}
@misc{noauthor_health_1996,
title = {Health {Insurance} {Portability} and {Accountability} {Act} of 1996},
note = {Pub. L. No. 104-191. 1996 [Online]. Available: https://www.govinfo.gov/content/pkg/PLAW-104publ191/html/PLAW-104publ191.htm},
}
@misc{EUdataregulations2018,
title = {Regulation (EU) 2016/679 of the {European} {Parliament} and of THE {Council}
of 27},
note = {Available: https://eur-lex.europa.eu/legal-content/EN/TXT/PDF/?uri=CELEX:32016R0679},
author = {The European Parliment and the Council of the European Union},
date = {2016-04-27}
}
@misc{PCI,
title = {Payment {Card} {Industry} {(PCI)} {Data} {Security} {Standard}},
note = {{Available: https://www.pcisecuritystandards.org/documents/PCI$\_$DSS$\_$v3-2-1.pdf}},
month = may,
year = {2018},
author = {PCI Security Standards Council}
}
@article{centrality_causal,
title = {Node centrality measures are a poor substitute for causal inference},
volume = {9},
issn = {6846},
doi = {10.1038/s41598-019-43033-9},
journal = {Scientific Reports},
author = {Dablander, Fabian and Hinne, Max},
year = {2019},
}
@inproceedings{Mieghem2018DirectedGA,
title={Directed graphs and mysterious complex eigenvalues},
author={Piet Van Mieghem},
year={2018}
}
@article{Guo2017HermitianAM,
title={Hermitian Adjacency Matrix of Digraphs and Mixed Graphs},
author={Krystal Guo and Bojan Mohar},
journal={Journal of Graph Theory},
year={2017},
volume={85}
}
@article{Brualdi2010SpectraOD,
title={Spectra of digraphs},
author={Richard A. Brualdi},
journal={Linear Algebra and its Applications},
year={2010},
volume={432},
pages={2181-2213}
}
@article {PMID:30064421,
Title = {A systematic survey of centrality measures for protein-protein interaction networks},
Author = {Ashtiani, Minoo and Salehzadeh-Yazdi, Ali and Razaghi-Moghadam, Zahra and Hennig, Holger and Wolkenhauer, Olaf and Mirzaie, Mehdi and Jafari, Mohieddin},
DOI = {10.1186/s12918-018-0598-2},
Number = {1},
Volume = {12},
Month = {July},
Year = {2018},
Journal = {BMC systems biology},
ISSN = {1752-0509},
Pages = {80},
Abstract = {<h4>Background</h4>Numerous centrality measures have been introduced to identify "central" nodes in large networks. The availability of a wide range of measures for ranking influential nodes leaves the user to decide which measure may best suit the analysis of a given network. The choice of a suitable measure is furthermore complicated by the impact of the network topology on ranking influential nodes by centrality measures. To approach this problem systematically, we examined the centrality profile of nodes of yeast protein-protein interaction networks (PPINs) in order to detect which centrality measure is succeeding in predicting influential proteins. We studied how different topological network features are reflected in a large set of commonly used centrality measures.<h4>Results</h4>We used yeast PPINs to compare 27 common of centrality measures. The measures characterize and assort influential nodes of the networks. We applied principal component analysis (PCA) and hierarchical clustering and found that the most informative measures depend on the network's topology. Interestingly, some measures had a high level of contribution in comparison to others in all PPINs, namely Latora closeness, Decay, Lin, Freeman closeness, Diffusion, Residual closeness and Average distance centralities.<h4>Conclusions</h4>The choice of a suitable set of centrality measures is crucial for inferring important functional properties of a network. We concluded that undertaking data reduction using unsupervised machine learning methods helps to choose appropriate variables (centrality measures). Hence, we proposed identifying the contribution proportions of the centrality measures with PCA as a prerequisite step of network analysis before inferring functional consequences, e.g., essentiality of a node.},
URL = {https://europepmc.org/articles/PMC6069823},
}
@Article{Katz,
author={Leo Katz},
title={{A new status index derived from sociometric analysis}},
journal={Psychometrika},
year=1953,
volume={18},
number={1},
pages={39-43},
month={March},
keywords={},
doi={10.1007/BF02289026},
abstract={No abstract is available for this item.},
url={https://ideas.repec.org/a/spr/psycho/v18y1953i1p39-43.html}
}
@article{ModKatz,
title={Katz centrality of Markovian temporal networks: Analysis and optimization},
author={Masaki Ogura and Victor M. Preciado},
journal={2017 American Control Conference (ACC)},
year={2017},
pages={5001-5006}
}
@book{newman2010networks,
title={Networks: An Introduction},
author={Newman, M.E.J.},
isbn={9780191594175},
url={https://books.google.com/books?id=sgSlvgEACAAJ},
year={2010},
publisher={Oxford University Press}
}
@article{K_Path_Edge,
doi = {10.1016/j.knosys.2012.01.007},
url = {https://doi.org/10.1016%2Fj.knosys.2012.01.007},
year = 2012,
month = {jun},
publisher = {Elsevier {BV}},
volume = {30},
pages = {136--150},
author = {Pasquale De Meo and Emilio Ferrara and Giacomo Fiumara and Angela Ricciardello},
title = {A novel measure of edge centrality in social networks},
journal = {Knowledge-Based Systems}
}
@article{Adapted_PageRank,
title={An algorithm for ranking the nodes of an urban network based on the concept of PageRank vector},
author={Taras Agryzkov and Jos{\'e} Luis Oliver and Leandro Tortosa and Jos{\'e}-Francisco Vicent},
journal={Appl. Math. Comput.},
year={2012},
volume={219},
pages={2186-2193}
}
@article{PageRank,
title = {The anatomy of a large-scale hypertextual Web search engine},
journal = {Computer Networks and ISDN Systems},
volume = {30},
number = {1},
pages = {107-117},
year = {1998},
note = {Proceedings of the Seventh International World Wide Web Conference},
issn = {0169-7552},
doi = {https://doi.org/10.1016/S0169-7552(98)00110-X},
url = {https://www.sciencedirect.com/science/article/pii/S016975529800110X},
author = {Sergey Brin and Lawrence Page},
keywords = {World Wide Web, Search engines, Information retrieval, PageRank, Google},
abstract = {In this paper, we present Google, a prototype of a large-scale search engine which makes heavy use of the structure present in hypertext. Google is designed to crawl and index the Web efficiently and produce much more satisfying search results than existing systems. The prototype with a full text and hyperlink database of at least 24 million pages is available at http://google.stanford.edu/ To engineer a search engine is a challenging task. Search engines index tens to hundreds of millions of Web pages involving a comparable number of distinct terms. They answer tens of millions of queries every day. Despite the importance of large-scale search engines on the Web, very little academic research has been done on them. Furthermore, due to rapid advance in technology and Web proliferation, creating a Web search engine today is very different from three years ago. This paper provides an in-depth description of our large-scale Web search engine — the first such detailed public description we know of to date. Apart from the problems of scaling traditional search techniques to data of this magnitude, there are new technical challenges involved with using the additional information present in hypertext to produce better search results. This paper addresses this question of how to build a practical large-scale system which can exploit the additional information present in hypertext. Also we look at the problem of how to effectively deal with uncontrolled hypertext collections where anyone can publish anything they want.}
}
@article{PageRank_Survey,
author = { Pavel Berkhin },
title = {A Survey on PageRank Computing},
journal = {Internet Mathematics},
volume = {2},
number = {1},
pages = {73-120},
year = {2005},
publisher = {Taylor & Francis},
doi = {10.1080/15427951.2005.10129098},
URL = {https://doi.org/10.1080/15427951.2005.10129098},
eprint = {https://doi.org/10.1080/15427951.2005.10129098}
}
@inproceedings{dominance,
author = {Prosser, Reese T.},
title = {Applications of Boolean Matrices to the Analysis of Flow Diagrams},
year = {1959},
isbn = {9781450378680},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/1460299.1460314},
doi = {10.1145/1460299.1460314},
abstract = {Any serious attempt at automatic programming of large-scale digital computing machines must provide for some sort of analysis of program structure. Questions concerning order of operations, location and disposition of transfers, identification of subroutines, internal consistency, redundancy and equivalence, all involve a knowledge of the structure of the program under study, and must be handled effectively by any automatic programming system.},
booktitle = {Papers Presented at the December 1-3, 1959, Eastern Joint IRE-AIEE-ACM Computer Conference},
pages = {133138},
numpages = {6},
location = {Boston, Massachusetts},
series = {IRE-AIEE-ACM '59 (Eastern)}
}