V.1.0 of MPI
This commit is contained in:
parent
a6a737ef9d
commit
bea6c82fb4
@ -144,6 +144,7 @@ HTML_DYNAMIC_SECTIONS = @DOXYGEN_HTML_DYNAMIC_SECTIONS@
|
||||
HTML_INDEX_NUM_ENTRIES = @DOXYGEN_HTML_INDEX_NUM_ENTRIES@
|
||||
GENERATE_DOCSET = @DOXYGEN_GENERATE_DOCSET@
|
||||
DOCSET_FEEDNAME = @DOXYGEN_DOCSET_FEEDNAME@
|
||||
DOCSET_FEEDURL = @DOXYGEN_DOCSET_FEEDURL@
|
||||
DOCSET_BUNDLE_ID = @DOXYGEN_DOCSET_BUNDLE_ID@
|
||||
DOCSET_PUBLISHER_ID = @DOXYGEN_DOCSET_PUBLISHER_ID@
|
||||
DOCSET_PUBLISHER_NAME = @DOXYGEN_DOCSET_PUBLISHER_NAME@
|
||||
@ -170,6 +171,7 @@ FULL_SIDEBAR = @DOXYGEN_FULL_SIDEBAR@
|
||||
ENUM_VALUES_PER_LINE = @DOXYGEN_ENUM_VALUES_PER_LINE@
|
||||
TREEVIEW_WIDTH = @DOXYGEN_TREEVIEW_WIDTH@
|
||||
EXT_LINKS_IN_WINDOW = @DOXYGEN_EXT_LINKS_IN_WINDOW@
|
||||
OBFUSCATE_EMAILS = @DOXYGEN_OBFUSCATE_EMAILS@
|
||||
HTML_FORMULA_FORMAT = @DOXYGEN_HTML_FORMULA_FORMAT@
|
||||
FORMULA_FONTSIZE = @DOXYGEN_FORMULA_FONTSIZE@
|
||||
FORMULA_TRANSPARENT = @DOXYGEN_FORMULA_TRANSPARENT@
|
||||
@ -242,7 +244,6 @@ GENERATE_TAGFILE = @DOXYGEN_GENERATE_TAGFILE@
|
||||
ALLEXTERNALS = @DOXYGEN_ALLEXTERNALS@
|
||||
EXTERNAL_GROUPS = @DOXYGEN_EXTERNAL_GROUPS@
|
||||
EXTERNAL_PAGES = @DOXYGEN_EXTERNAL_PAGES@
|
||||
CLASS_DIAGRAMS = @DOXYGEN_CLASS_DIAGRAMS@
|
||||
DIA_PATH = @DOXYGEN_DIA_PATH@
|
||||
HIDE_UNDOC_RELATIONS = @DOXYGEN_HIDE_UNDOC_RELATIONS@
|
||||
HAVE_DOT = @DOXYGEN_HAVE_DOT@
|
||||
@ -264,6 +265,7 @@ CALL_GRAPH = @DOXYGEN_CALL_GRAPH@
|
||||
CALLER_GRAPH = @DOXYGEN_CALLER_GRAPH@
|
||||
GRAPHICAL_HIERARCHY = @DOXYGEN_GRAPHICAL_HIERARCHY@
|
||||
DIRECTORY_GRAPH = @DOXYGEN_DIRECTORY_GRAPH@
|
||||
DIR_GRAPH_MAX_DEPTH = @DOXYGEN_DIR_GRAPH_MAX_DEPTH@
|
||||
DOT_IMAGE_FORMAT = @DOXYGEN_DOT_IMAGE_FORMAT@
|
||||
INTERACTIVE_SVG = @DOXYGEN_INTERACTIVE_SVG@
|
||||
DOT_PATH = @DOXYGEN_DOT_PATH@
|
||||
|
||||
@ -450,6 +450,9 @@ endif()
|
||||
if(NOT DEFINED DOXYGEN_EXT_LINKS_IN_WINDOW)
|
||||
set(DOXYGEN_EXT_LINKS_IN_WINDOW NO)
|
||||
endif()
|
||||
if(NOT DEFINED DOXYGEN_OBFUSCATE_EMAILS)
|
||||
set(DOXYGEN_OBFUSCATE_EMAILS YES)
|
||||
endif()
|
||||
if(NOT DEFINED DOXYGEN_HTML_FORMULA_FORMAT)
|
||||
set(DOXYGEN_HTML_FORMULA_FORMAT png)
|
||||
endif()
|
||||
@ -594,9 +597,6 @@ endif()
|
||||
if(NOT DEFINED DOXYGEN_EXTERNAL_PAGES)
|
||||
set(DOXYGEN_EXTERNAL_PAGES YES)
|
||||
endif()
|
||||
if(NOT DEFINED DOXYGEN_CLASS_DIAGRAMS)
|
||||
set(DOXYGEN_CLASS_DIAGRAMS YES)
|
||||
endif()
|
||||
if(NOT DEFINED DOXYGEN_HIDE_UNDOC_RELATIONS)
|
||||
set(DOXYGEN_HIDE_UNDOC_RELATIONS YES)
|
||||
endif()
|
||||
@ -654,6 +654,9 @@ endif()
|
||||
if(NOT DEFINED DOXYGEN_DIRECTORY_GRAPH)
|
||||
set(DOXYGEN_DIRECTORY_GRAPH YES)
|
||||
endif()
|
||||
if(NOT DEFINED DOXYGEN_DIR_GRAPH_MAX_DEPTH)
|
||||
set(DOXYGEN_DIR_GRAPH_MAX_DEPTH 1)
|
||||
endif()
|
||||
if(NOT DEFINED DOXYGEN_DOT_IMAGE_FORMAT)
|
||||
set(DOXYGEN_DOT_IMAGE_FORMAT png)
|
||||
endif()
|
||||
|
||||
BIN
build/ag_gen
BIN
build/ag_gen
Binary file not shown.
@ -1,132 +1,132 @@
|
||||
[
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/main.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/main.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/main.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/main.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/main.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/nm_scanner.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/build/nm_scanner.c\"",
|
||||
"command": "/usr/bin/cc -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/nm_scanner.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/build/nm_scanner.c\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/build/nm_scanner.c"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/nm_parser.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/build/nm_parser.c\"",
|
||||
"command": "/usr/bin/cc -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/nm_parser.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/build/nm_parser.c\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/build/nm_parser.c"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/xp_scanner.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/build/xp_scanner.c\"",
|
||||
"command": "/usr/bin/cc -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/xp_scanner.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/build/xp_scanner.c\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/build/xp_scanner.c"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/xp_parser.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/build/xp_parser.c\"",
|
||||
"command": "/usr/bin/cc -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/xp_parser.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/build/xp_parser.c\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/build/xp_parser.c"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/ag_gen.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/ag_gen.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/ag_gen.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/ag_gen.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/ag_gen.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/asset.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/asset.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/asset.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/asset.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/asset.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/assetgroup.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/assetgroup.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/assetgroup.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/assetgroup.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/assetgroup.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/edge.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/edge.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/edge.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/edge.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/edge.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/exploit.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/exploit.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/exploit.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/exploit.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/exploit.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/factbase.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/factbase.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/factbase.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/factbase.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/factbase.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/network_state.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/network_state.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/network_state.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/network_state.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/network_state.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/quality.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/quality.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/quality.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/quality.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/quality.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/topology.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/topology.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/ag_gen/topology.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/topology.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/ag_gen/topology.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/util/avail_mem.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/avail_mem.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/util/avail_mem.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/avail_mem.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/avail_mem.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/build_sql.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/build_sql.c\"",
|
||||
"command": "/usr/bin/cc -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/build_sql.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/build_sql.c\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/build_sql.c"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/util/common.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/common.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/util/common.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/common.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/common.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/util/db_functions.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/db_functions.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/util/db_functions.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/db_functions.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/db_functions.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/hash.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/hash.c\"",
|
||||
"command": "/usr/bin/cc -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/hash.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/hash.c\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/hash.c"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/list.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/list.c\"",
|
||||
"command": "/usr/bin/cc -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/list.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/list.c\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/list.c"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/mem.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/mem.c\"",
|
||||
"command": "/usr/bin/cc -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/mem.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/mem.c\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/mem.c"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/util/redis_manager.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/redis_manager.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/util/redis_manager.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/redis_manager.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/redis_manager.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/str_array.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/str_array.c\"",
|
||||
"command": "/usr/bin/cc -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/str_array.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/str_array.c\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/str_array.c"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/cc -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/vector.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/vector.c\"",
|
||||
"command": "/usr/bin/cc -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -g -pthread -o CMakeFiles/ag_gen.dir/src/util/vector.c.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/vector.c\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/util/vector.c"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/mpi/serialize.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/mpi/serialize.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/mpi/serialize.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/mpi/serialize.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/mpi/serialize.cpp"
|
||||
},
|
||||
{
|
||||
"directory": "/home/noah/Documents/School/Thesis Work/ag_parallel/build",
|
||||
"command": "/usr/bin/c++ -DBOOST_ALL_NO_LIB -DBOOST_MPI_DYN_LINK -DBOOST_SERIALIZATION_DYN_LINK -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/mpi/tasks.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/mpi/tasks.cpp\"",
|
||||
"command": "/usr/bin/c++ -DBOOST_MPI_DYN_LINK -DBOOST_MPI_NO_LIB -DBOOST_SERIALIZATION_DYN_LINK -DBOOST_SERIALIZATION_NO_LIB -I/usr/include/postgresql -I\"/home/noah/Documents/School/Thesis Work/ag_parallel/src\" -std=c++14 -fopenmp -DREDIS -g -g -O0 -ggdb -Wall -fopenmp -pedantic -pthread -o CMakeFiles/ag_gen.dir/src/mpi/tasks.cpp.o -c \"/home/noah/Documents/School/Thesis Work/ag_parallel/src/mpi/tasks.cpp\"",
|
||||
"file": "/home/noah/Documents/School/Thesis Work/ag_parallel/src/mpi/tasks.cpp"
|
||||
},
|
||||
{
|
||||
|
||||
@ -5,6 +5,8 @@ NUM_SERV=${1:-1}
|
||||
TYPE=${2:-sync}
|
||||
CARS=${3:-2}
|
||||
DBNAME=${4:-tmp}
|
||||
NODES=${5:-3}
|
||||
|
||||
ITFC="eth0"
|
||||
|
||||
strval1="sync"
|
||||
@ -54,7 +56,7 @@ if [ "$TYPE" == "$strval1" ]; then
|
||||
#Curr
|
||||
#mpirun --mca btl tcp,self --mca btl_tcp_if_include "$ITFC" --mca opal_warn_on_missing_libcuda 0 ./ag_gen -n ../Oct_2021/nm_files/"$CARS"_car_timeline_maintenance.nm -x ../Oct_2021/Sync/4_Exploits/"$NUM_SERV"_Serv/sync_timeline_maintenance.xp -t 1 -q 1 -p -a 0.6 -z "$DBNAME"
|
||||
#Test
|
||||
mpirun --mca btl_openib_allow_ib 1 --mca opal_warn_on_missing_libcuda 0 ./ag_gen -n ../Oct_2021/nm_files/"$CARS"_car_timeline_maintenance.nm -x ../Oct_2021/Sync/4_Exploits/"$NUM_SERV"_Serv/sync_timeline_maintenance.xp -t 1 -q 1 -p -a 0.6 -z "$DBNAME"
|
||||
mpirun --mca btl_openib_allow_ib 1 --mca opal_warn_on_missing_libcuda 0 --mca btl ^uct -np "$NODES" ./ag_gen -n ../Oct_2021/nm_files/"$CARS"_car_timeline_maintenance.nm -x ../Oct_2021/Sync/4_Exploits/"$NUM_SERV"_Serv/sync_timeline_maintenance.xp -t 1 -q 1 -p -a 0.6 -z "$DBNAME"
|
||||
|
||||
|
||||
elif [ "$TYPE" == "$strval2" ]; then
|
||||
@ -62,7 +64,7 @@ elif [ "$TYPE" == "$strval2" ]; then
|
||||
#Test
|
||||
#mpirun --mca btl tcp,self --mca btl_tcp_if_include "$ITFC" --mca opal_warn_on_missing_libcuda 0 ./ag_gen -n ../Oct_2021/nm_files/"$CARS"_car_timeline_maintenance.nm -x ../Oct_2021/Non_Sync/4_Exploits/"$NUM_SERV"_Serv/generic_timeline_maintenance.xp -t 1 -q 1 -p -a 0.6 -z "$DBNAME"
|
||||
|
||||
mpirun --mca btl_openib_allow_ib 1 --mca opal_warn_on_missing_libcuda 0 ./ag_gen -n ../Oct_2021/nm_files/"$CARS"_car_timeline_maintenance.nm -x ../Oct_2021/Sync/4_Exploits/"$NUM_SERV"_Serv/sync_timeline_maintenance.xp -t 1 -q 1 -p -a 0.6 -z "$DBNAME"
|
||||
mpirun --mca btl_openib_allow_ib 1 --mca opal_warn_on_missing_libcuda 0 -np "$NODES" ./ag_gen -n ../Oct_2021/nm_files/"$CARS"_car_timeline_maintenance.nm -x ../Oct_2021/Non_Sync/4_Exploits/"$NUM_SERV"_Serv/generic_timeline_maintenance.xp -t 1 -q 1 -p -a 0.6 -z "$DBNAME"
|
||||
|
||||
else
|
||||
echo "Running default."
|
||||
|
||||
@ -27,12 +27,16 @@
|
||||
#include <boost/serialization/list.hpp>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <boost/serialization/string.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
#include <boost/mpi.hpp>
|
||||
#include <boost/mpi/environment.hpp>
|
||||
#include <boost/mpi/communicator.hpp>
|
||||
#include <boost/mpi/collectives.hpp>
|
||||
|
||||
|
||||
#include <boost/serialization/is_bitwise_serializable.hpp>
|
||||
#include <boost/range/irange.hpp>
|
||||
|
||||
namespace mpi = boost::mpi;
|
||||
|
||||
@ -94,7 +98,7 @@ AGGen::AGGen(AGGenInstance &_instance) : instance(_instance) {
|
||||
* @param group A tuple containing the exploit and applicable assets
|
||||
* @return A tuple containing the "real" qualities and "real" topologies
|
||||
*/
|
||||
static std::tuple<std::vector<std::tuple<ACTION_T, Quality>>, std::vector<std::tuple<ACTION_T, Topology>>>
|
||||
std::tuple<std::vector<std::tuple<ACTION_T, Quality>>, std::vector<std::tuple<ACTION_T, Topology>>>
|
||||
createPostConditions(std::tuple<Exploit, AssetGroup> &group, Keyvalue &facts) {
|
||||
auto ex = std::get<0>(group);
|
||||
auto ag = std::get<1>(group);
|
||||
@ -328,6 +332,7 @@ AGGenInstance &AGGen::generate(bool batch_process, int batch_size, int numThrd,
|
||||
printf("The number of threads used is %d\n",numThrd);
|
||||
printf("The initial QSize is %d\n",initQSize);
|
||||
*/
|
||||
|
||||
int frt_size=frontier.size();
|
||||
printf("The actual QSize to start using multiple threads is %d\n",frt_size);
|
||||
|
||||
@ -337,24 +342,53 @@ AGGenInstance &AGGen::generate(bool batch_process, int batch_size, int numThrd,
|
||||
gettimeofday(&t1,NULL);
|
||||
|
||||
int num_tasks = 6;
|
||||
#pragma omp parallel for num_threads(numThrd) default(none) shared(esize,counter,exploit_list,od_map,frt_size,total_t,t1,t2,std::cout, mem_threshold, num_tasks, ex_groups) schedule(dynamic,1)
|
||||
#pragma omp parallel for num_threads(numThrd) default(none) shared(esize,counter,exploit_list,od_map,frt_size,total_t,t1,t2,std::cout, mem_threshold, num_tasks, ex_groups, world) schedule(dynamic,1)
|
||||
//auto ag_start = std::chrono::system_clock::now();
|
||||
for(int k=0;k<frt_size;k++){
|
||||
|
||||
|
||||
int mpi_exit = 0;
|
||||
double f_alpha = 0.0;
|
||||
auto tot_sys_mem = getTotalSystemMemory();
|
||||
std::deque<NetworkState> localFrontier;
|
||||
localFrontier.emplace_front(frontier[k]);
|
||||
|
||||
//TODO: Abort signal from Task 0 when all nodes should be done since they don't keep
|
||||
//up with frontier
|
||||
while (!localFrontier.empty() || !unex_empty()){//while starts
|
||||
if (mpi_exit == 1){
|
||||
#pragma omp cancel for
|
||||
}
|
||||
|
||||
while (!localFrontier.empty() || !unex_empty() || world.rank() > 0){//while starts
|
||||
|
||||
//Node 0 needs to tell other nodes to continue
|
||||
if(world.rank() == 0){
|
||||
int dummy = 1;
|
||||
for (int w = 1; w < world.size(); w++)
|
||||
world.isend(w, 14, dummy);
|
||||
}
|
||||
|
||||
else {
|
||||
//If we don't have the go-ahead, check for the Finalize message.
|
||||
while(!world.iprobe(0, 14)){
|
||||
if(world.iprobe(0, 15)){
|
||||
mpi_exit = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(mpi_exit == 1)
|
||||
break;
|
||||
//Receive the message so it's not just sitting in the queue.
|
||||
int dummy;
|
||||
world.irecv(0, 14, dummy);
|
||||
}
|
||||
|
||||
world.barrier();
|
||||
|
||||
|
||||
//We need to refill the localFrontier with states from the database if it's empty
|
||||
if(localFrontier.empty() && world.rank() == 0) {
|
||||
task_zero(instance, localFrontier, mem_threshold);
|
||||
}
|
||||
//Have all nodes wait until Frontier is refilled?
|
||||
//world.barrier();
|
||||
world.barrier();
|
||||
|
||||
//Task 1 Node Allocating
|
||||
int alloc;
|
||||
@ -371,47 +405,421 @@ AGGenInstance &AGGen::generate(bool batch_process, int batch_size, int numThrd,
|
||||
reduc_factor = 1;
|
||||
}
|
||||
|
||||
//Create Communicators
|
||||
boost::mpi::communicator tcomm = world.split(world.rank() > 0 && world.rank() <= alloc);
|
||||
|
||||
boost::mpi::communicator ttwo_comm = world.split(world.rank() == send_check(world, alloc) && world.rank() <= send_check(world, 2*two_alloc));
|
||||
|
||||
//Task 0 to Task 1 Communication
|
||||
if(world.rank() == 0)
|
||||
{
|
||||
auto current_state = localFrontier.back();
|
||||
auto current_hash = current_state.get_hash(instance.facts);
|
||||
localFrontier.pop_back();
|
||||
for(l=0; l <= alloc; l++){
|
||||
world.isend(send_check(world, world.rank()), 0, current_state);
|
||||
for(int l=0; l <= alloc; l++){
|
||||
world.isend(send_check(world, world.rank()+l), 20, current_state);
|
||||
}
|
||||
}
|
||||
|
||||
//Execute Task 1
|
||||
if (world.rank() > 0 && world.rank() <= alloc){
|
||||
NetworkState{current_state};
|
||||
world.irecv(mpi::any_source, 0, current_state);
|
||||
task_one(instance, current_state, exploit_list, od_map, alloc, two_alloc, reduc_factor, num_tasks, world);
|
||||
NetworkState current_state;
|
||||
world.recv(mpi::any_source, 20, current_state);
|
||||
task_one(instance, current_state, exploit_list, od_map, alloc, two_alloc, reduc_factor, num_tasks, world, tcomm);
|
||||
}
|
||||
world.barrier();
|
||||
|
||||
//Execute Task 2
|
||||
if(world.rank() == 1 + alloc && world.rank() <= two_alloc)
|
||||
if(world.rank() == send_check(world, alloc) && world.rank() <= send_check(world, 2*two_alloc))
|
||||
{
|
||||
task_two(instance);
|
||||
//Execute Task 2
|
||||
task_two(instance, alloc, two_alloc, world, localFrontier, mem_threshold,\
|
||||
ttwo_comm, ex_groups, hash_map);
|
||||
//Wait for all Task 2 nodes to finish
|
||||
ttwo_comm.barrier();
|
||||
|
||||
//Have the 0th Task 2 node tell the other world nodes that it's done
|
||||
if(ttwo_comm.rank() == 0){
|
||||
for (int w = 0; w < world.size(); w++)
|
||||
{
|
||||
if(w != world.rank() && w > send_check(world, 2*two_alloc))
|
||||
{
|
||||
world.isend(w, 2, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//recv
|
||||
//world.isend(world.rank()+1+s, 0, appl_exploits);
|
||||
else{
|
||||
/*
|
||||
One, need to listen for Updates to instances.facts
|
||||
MPI TAGS:
|
||||
Tag 2 = Task 2 is done
|
||||
Tag 3 = New fact
|
||||
Tag 4 = Hash New State
|
||||
Tag 5 = Critical New State
|
||||
*/
|
||||
//If we haven't been told that task 2 is finished, and if we still more facts or states to update:
|
||||
//while(!world.iprobe(1+alloc, 2) && world.iprobe(mpi::any_source, 3) && world.iprobe(mpi::any_source, 4) && world.iprobe(mpi::any_source, 5))
|
||||
|
||||
while(!world.iprobe(send_check(world, alloc), 2) || world.iprobe(mpi::any_source, 3) || world.iprobe(mpi::any_source, 4) || world.iprobe(mpi::any_source, 5))
|
||||
{
|
||||
|
||||
//If we get a new fact and new state, update
|
||||
if(world.iprobe(mpi::any_source, 3) && world.iprobe(mpi::any_source, 4)){
|
||||
NetworkState new_state;
|
||||
Quality fact;
|
||||
|
||||
world.irecv(mpi::any_source, 3, fact);
|
||||
world.irecv(mpi::any_source, 4, new_state);
|
||||
|
||||
instance.facts.hash_table[new_state.compound_assign(fact)]=instance.facts.size();
|
||||
instance.facts.length++;
|
||||
instance.facts.str_vector.push_back(new_state.compound_assign(fact));
|
||||
}
|
||||
if(world.rank() == 0){
|
||||
if(world.iprobe(mpi::any_source, 5)){
|
||||
NetworkState critical_state;
|
||||
NetworkState current_state;
|
||||
Exploit exploit;
|
||||
AssetGroup assetGroup;
|
||||
world.irecv(mpi::any_source, 5, critical_state);
|
||||
world.irecv(mpi::any_source, 6, current_state);
|
||||
world.irecv(mpi::any_source, 10, exploit);
|
||||
world.irecv(mpi::any_source, 11, assetGroup);
|
||||
|
||||
task_three(instance, critical_state, localFrontier, mem_threshold, world,\
|
||||
two_alloc, current_state, exploit, assetGroup, hash_map);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
//Receive the message so it doesn't just sit there
|
||||
int ttwo_done;
|
||||
world.recv(mpi::any_source, 2, ttwo_done);
|
||||
}
|
||||
world.barrier();
|
||||
|
||||
//Task Four
|
||||
if (world.rank() == send_check(world, 2*two_alloc+1)){
|
||||
if(world.iprobe(0, 7) || world.iprobe(0, 8)){
|
||||
std::vector<Factbase> factbases_dump;
|
||||
std::vector<Edge> edges_dump;
|
||||
world.irecv(0, 7, factbases_dump);
|
||||
world.irecv(0, 8, edges_dump);
|
||||
instance.factbases = factbases_dump;
|
||||
instance.edges = edges_dump;
|
||||
//task_four(instance);
|
||||
save_ag_to_db(instance, true);
|
||||
}
|
||||
}
|
||||
} //while ends
|
||||
|
||||
if(world.rank() == 0){
|
||||
for (int w = 1; w < world.size(); w++)
|
||||
world.isend(w, 15, 1);
|
||||
}
|
||||
|
||||
if (mpi_exit == 1){
|
||||
#pragma omp cancel for
|
||||
}
|
||||
|
||||
auto ag_end= std::chrono::system_clock::now();
|
||||
|
||||
}//OpenMP block ends
|
||||
std::cout << "Process " << world.rank() << " is finishing." << std::endl;
|
||||
//Wait for db ops to finish
|
||||
world.barrier();
|
||||
if(world.rank() == 0){
|
||||
gettimeofday(&t2,NULL);
|
||||
total_t+=(t2.tv_sec-t1.tv_sec)*1000.0+(t2.tv_usec-t1.tv_usec)/1000.0;
|
||||
std::cout << "Graph generation took " << total_t << " ms for process " << world.rank() << std::endl;
|
||||
printf("AG TOOK %lf ms.\n", total_t);
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
std::chrono::duration<double> elapsed_seconds = end - start;
|
||||
instance.elapsed_seconds = elapsed_seconds;
|
||||
}
|
||||
|
||||
//std::vector<std::tuple<Exploit, AssetGroup>> appl_exploits;
|
||||
return instance;
|
||||
}
|
||||
|
||||
//if (world.size() > num_tasks){
|
||||
// if (world.rank() >= two_alloc + 1 && world.rank() <= (2 * two_alloc + reduc_factor)){
|
||||
//recv the appl exploits
|
||||
// }
|
||||
//}
|
||||
AGGenInstance &AGGen::single_generate(bool batch_process, int batch_num, int numThrd,\
|
||||
int initQSize, double mem_threshold, boost::mpi::communicator &world){
|
||||
|
||||
std::vector<Exploit> exploit_list = instance.exploits;
|
||||
|
||||
//Create a vector that contains all the groups of exploits to be fired synchonously
|
||||
std::vector<std::string> ex_groups;
|
||||
for (const auto &ex : exploit_list) {
|
||||
//If the group isn't already in the vector
|
||||
if(!(std::find(ex_groups.begin(), ex_groups.end(), ex.get_group()) !=ex_groups.end())) {
|
||||
//Don't include the "no" group
|
||||
if(ex.get_group()!="null")
|
||||
ex_groups.emplace_back(ex.get_group());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//Print out the groups if desired
|
||||
std::cout <<"\nThere are "<<ex_groups.size()<<" groups: ";
|
||||
for(int i=0; i<ex_groups.size(); i++){
|
||||
std::cout<<ex_groups[i] << ". ";
|
||||
}
|
||||
std::cout<<"\n";
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
unsigned long esize = exploit_list.size();
|
||||
printf("esize: %lu\n", esize);
|
||||
bool save_queued = false;
|
||||
|
||||
std::cout << "Generating Attack Graph" << std::endl;
|
||||
|
||||
std::unordered_map<size_t, PermSet<size_t>> od_map;
|
||||
size_t assets_size = instance.assets.size();
|
||||
for (const auto &ex : exploit_list) {
|
||||
size_t num_params = ex.get_num_params();
|
||||
if (od_map.find(num_params) == od_map.end()) {
|
||||
Odometer<size_t> od(num_params, assets_size);
|
||||
od_map[num_params] = od.get_all();
|
||||
}
|
||||
}
|
||||
/*
|
||||
//might be where to apply parallelization.
|
||||
while (frontier.size()<initQSize){//while starts, test multiple thread case THIS WAS THE ONE MING USED
|
||||
//while (frontier.size()!=0){//while starts, test single thread case
|
||||
// while(!frontier.empty()) {
|
||||
auto current_state = frontier.back();
|
||||
auto current_hash = current_state.get_hash(instance.facts);
|
||||
frontier.pop_back();
|
||||
std::vector<std::tuple<Exploit, AssetGroup>> appl_exploits;
|
||||
for (size_t i = 0; i < esize; i++) {//for loop for applicable exploits starts
|
||||
auto e = exploit_list.at(i);
|
||||
size_t num_params = e.get_num_params();
|
||||
auto preconds_q = e.precond_list_q();
|
||||
auto preconds_t = e.precond_list_t();
|
||||
auto perms = od_map[num_params];
|
||||
std::vector<AssetGroup> asset_groups;
|
||||
for (auto perm : perms) {
|
||||
std::vector<Quality> asset_group_quals;
|
||||
std::vector<Topology> asset_group_topos;
|
||||
asset_group_quals.reserve(preconds_q.size());
|
||||
asset_group_topos.reserve(preconds_t.size());
|
||||
for (auto &precond : preconds_q) {
|
||||
asset_group_quals.emplace_back(
|
||||
perm[precond.get_param_num()], precond.name, precond.op,
|
||||
precond.value, instance.facts);
|
||||
}
|
||||
for (auto &precond : preconds_t) {
|
||||
auto dir = precond.get_dir();
|
||||
auto prop = precond.get_property();
|
||||
auto op = precond.get_operation();
|
||||
auto val = precond.get_value();
|
||||
|
||||
asset_group_topos.emplace_back(
|
||||
perm[precond.get_from_param()],
|
||||
perm[precond.get_to_param()], dir, prop, op, val, instance.facts);
|
||||
}
|
||||
|
||||
asset_groups.emplace_back(asset_group_quals, asset_group_topos,
|
||||
perm);
|
||||
}
|
||||
auto assetgroup_size = asset_groups.size();
|
||||
for (size_t j = 0; j < assetgroup_size; j++) {
|
||||
auto asset_group = asset_groups.at(j);
|
||||
for (auto &quality : asset_group.get_hypo_quals()) {
|
||||
if (!current_state.get_factbase().find_quality(quality)) {
|
||||
goto LOOPCONTINUE;
|
||||
}
|
||||
}
|
||||
for (auto &topology : asset_group.get_hypo_topos()) {
|
||||
if (!current_state.get_factbase().find_topology(topology)) {
|
||||
goto LOOPCONTINUE;
|
||||
}
|
||||
}
|
||||
//MING DID NOT HAVE THIS CRITICAL, BUT KYLE DID
|
||||
// #pragma omp critical
|
||||
{
|
||||
auto new_appl_exploit = std::make_tuple(e, asset_group);
|
||||
appl_exploits.push_back(new_appl_exploit);
|
||||
}
|
||||
LOOPCONTINUE:;
|
||||
}
|
||||
} //for loop for applicable exploits ends
|
||||
|
||||
auto appl_expl_size = appl_exploits.size();
|
||||
for (size_t j = 0; j < appl_expl_size; j++) { //for loop for new states starts
|
||||
auto e = appl_exploits.at(j);
|
||||
auto exploit = std::get<0>(e);
|
||||
auto assetGroup = std::get<1>(e);
|
||||
auto postconditions = createPostConditions(e, instance.facts);
|
||||
auto qualities = std::get<0>(postconditions);
|
||||
auto topologies = std::get<1>(postconditions);
|
||||
NetworkState new_state{current_state};
|
||||
for(auto &qual : qualities) {
|
||||
auto action = std::get<0>(qual);
|
||||
auto fact = std::get<1>(qual);
|
||||
switch(action) {
|
||||
case ADD_T:
|
||||
new_state.add_quality(fact);
|
||||
break;
|
||||
case UPDATE_T:
|
||||
new_state.update_quality(fact);
|
||||
break;
|
||||
case DELETE_T:
|
||||
new_state.delete_quality(fact);
|
||||
break;
|
||||
}
|
||||
}
|
||||
for(auto &topo : topologies) {
|
||||
auto action = std::get<0>(topo);
|
||||
auto fact = std::get<1>(topo);
|
||||
switch(action) {
|
||||
case ADD_T:
|
||||
new_state.add_topology(fact);
|
||||
break;
|
||||
case UPDATE_T:
|
||||
new_state.update_topology(fact);
|
||||
break;
|
||||
case DELETE_T:
|
||||
new_state.delete_topology(fact);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
auto hash_num = new_state.get_hash(instance.facts);
|
||||
if (hash_num == current_hash)
|
||||
continue;
|
||||
if (hash_map.find(hash_num) == hash_map.end()) {
|
||||
new_state.set_id();
|
||||
auto facts_tuple = new_state.get_factbase().get_facts_tuple();
|
||||
FactbaseItems new_items =
|
||||
std::make_tuple(facts_tuple, new_state.get_id());
|
||||
instance.factbase_items.push_back(new_items);
|
||||
instance.factbases.push_back(new_state.get_factbase());
|
||||
hash_map.insert(std::make_pair(new_state.get_hash(instance.facts), new_state.get_id()));
|
||||
frontier.emplace_front(new_state);
|
||||
Edge ed(current_state.get_id(), new_state.get_id(), exploit, assetGroup);
|
||||
ed.set_id();
|
||||
instance.edges.push_back(ed);
|
||||
counter++;
|
||||
}
|
||||
else {
|
||||
int id = hash_map[hash_num];
|
||||
Edge ed(current_state.get_id(), id, exploit, assetGroup);
|
||||
ed.set_id();
|
||||
instance.edges.push_back(ed);
|
||||
}
|
||||
} //for loop for new states ends
|
||||
} //while ends
|
||||
|
||||
//int numThrd=32;
|
||||
printf("The number of threads used is %d\n",numThrd);
|
||||
printf("The initial QSize is %d\n",initQSize);
|
||||
*/
|
||||
int frt_size=frontier.size();
|
||||
printf("The actual QSize to start using multiple threads is %d\n",frt_size);
|
||||
|
||||
|
||||
double total_t=0.0;
|
||||
struct timeval t1,t2;
|
||||
gettimeofday(&t1,NULL);
|
||||
//#pragma omp parallel for num_threads(numThrd) default(none) shared(esize,counter,exploit_list,od_map,frt_size,total_t,t1,t2) schedule(dynamic,1)
|
||||
#pragma omp parallel for num_threads(numThrd) default(none) shared(esize,exploit_list,od_map,frt_size,total_t,t1,t2,std::cout, mem_threshold, ex_groups) schedule(dynamic,1)
|
||||
//auto ag_start = std::chrono::system_clock::now();
|
||||
for(int k=0;k<frt_size;k++){
|
||||
|
||||
double f_alpha = 0.0;
|
||||
auto tot_sys_mem = getTotalSystemMemory();
|
||||
std::deque<NetworkState> localFrontier;
|
||||
localFrontier.emplace_front(frontier[k]);
|
||||
while (!localFrontier.empty() || !unex_empty()){//while starts
|
||||
//We need to refill the localFrontier with states from the database if it's empty
|
||||
if(localFrontier.empty()) {
|
||||
std::cout << "Frontier empty, retrieving from database" << std::endl;
|
||||
double total_tt = 0.0;
|
||||
struct timeval tt1,tt2;
|
||||
gettimeofday(&tt1,NULL);
|
||||
int retrv_counter = 0;
|
||||
|
||||
//TODO: One (or a few) larger queries to pull in new states, rather than single queries that pull states one-by-one
|
||||
do {
|
||||
NetworkState db_new_state = fetch_unexplored(instance.facts);
|
||||
localFrontier.emplace_front(db_new_state);
|
||||
//alpha = get_alpha();
|
||||
f_alpha = (static_cast<double>(localFrontier.size()) * (localFrontier.back().get_size()))/tot_sys_mem;
|
||||
retrv_counter += 1;
|
||||
}
|
||||
//Leave a 30% buffer in alpha
|
||||
while((f_alpha <= (mem_threshold * 0.7)) && !unex_empty());
|
||||
|
||||
std::cout << "Retrieved " << retrv_counter << " factbases from the database." << std::endl;
|
||||
gettimeofday(&tt2,NULL);
|
||||
total_tt+=(tt2.tv_sec-tt1.tv_sec)*1000.0+(tt2.tv_usec-tt1.tv_usec)/1000.0;
|
||||
//printf("Retrieving from db took %lf s.\n", total_tt);
|
||||
}
|
||||
//std::cout<<"FRONTIER SIZE: "<<localFrontier.size()<<std::endl;
|
||||
auto current_state = localFrontier.back();
|
||||
auto current_hash = current_state.get_hash(instance.facts);
|
||||
localFrontier.pop_back();
|
||||
std::vector<std::tuple<Exploit, AssetGroup>> appl_exploits;
|
||||
for (size_t i = 0; i < esize; i++) {//for loop for applicable exploits starts
|
||||
auto e = exploit_list.at(i);
|
||||
size_t num_params = e.get_num_params();
|
||||
auto preconds_q = e.precond_list_q();
|
||||
auto preconds_t = e.precond_list_t();
|
||||
auto perms = od_map[num_params];
|
||||
std::vector<AssetGroup> asset_groups;
|
||||
for (auto perm : perms) {
|
||||
std::vector<Quality> asset_group_quals;
|
||||
std::vector<Topology> asset_group_topos;
|
||||
asset_group_quals.reserve(preconds_q.size());
|
||||
asset_group_topos.reserve(preconds_t.size());
|
||||
|
||||
|
||||
//std::vector<int>::size_type sz;
|
||||
//sz=asset_group_quals.capacity();
|
||||
for (auto &precond : preconds_q) {
|
||||
|
||||
//Old quality encode caused this to crash
|
||||
asset_group_quals.emplace_back(
|
||||
perm[precond.get_param_num()], precond.name, precond.op,
|
||||
precond.value, instance.facts);
|
||||
}
|
||||
for (auto &precond : preconds_t) {
|
||||
auto dir = precond.get_dir();
|
||||
auto prop = precond.get_property();
|
||||
auto op = precond.get_operation();
|
||||
auto val = precond.get_value();
|
||||
asset_group_topos.emplace_back(
|
||||
perm[precond.get_from_param()],
|
||||
perm[precond.get_to_param()], dir, prop, op, val, instance.facts);
|
||||
}
|
||||
asset_groups.emplace_back(asset_group_quals, asset_group_topos,
|
||||
perm);
|
||||
}
|
||||
auto assetgroup_size = asset_groups.size();
|
||||
for (size_t j = 0; j < assetgroup_size; j++) {
|
||||
auto asset_group = asset_groups.at(j);
|
||||
for (auto &quality : asset_group.get_hypo_quals()) {
|
||||
if (!current_state.get_factbase().find_quality(quality)) {
|
||||
goto LOOPCONTINUE1;
|
||||
}
|
||||
}
|
||||
for (auto &topology : asset_group.get_hypo_topos()) {
|
||||
if (!current_state.get_factbase().find_topology(topology)) {
|
||||
goto LOOPCONTINUE1;
|
||||
}
|
||||
}
|
||||
{
|
||||
auto new_appl_exploit = std::make_tuple(e, asset_group);
|
||||
appl_exploits.push_back(new_appl_exploit);
|
||||
}
|
||||
LOOPCONTINUE1:;
|
||||
}
|
||||
} //for loop for creating applicable exploits ends
|
||||
|
||||
//task_two();
|
||||
|
||||
std::map<std::string, int> group_fired; //Map to hold fired status per group
|
||||
std::map<std::string, std::vector<std::tuple<Exploit, AssetGroup>>> sync_vectors; //Map to hold all group exploits
|
||||
|
||||
@ -430,7 +838,7 @@ AGGenInstance &AGGen::generate(bool batch_process, int batch_size, int numThrd,
|
||||
sync_vectors[egroup].push_back(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//loop through the vector
|
||||
for(auto itr=appl_exploits.begin(); itr!=appl_exploits.end(); itr++){
|
||||
|
||||
@ -539,6 +947,7 @@ AGGenInstance &AGGen::generate(bool batch_process, int batch_size, int numThrd,
|
||||
double i_usage = instance.factbases.back().get_size() * instance.factbases.size() * 2 + sizeof(instance.edges[0]) * instance.edges.size();
|
||||
|
||||
i_alpha = i_usage/tot_sys_mem;
|
||||
double f_alpha;
|
||||
if (!localFrontier.empty())
|
||||
f_alpha = (static_cast<double>(localFrontier.size()) * (localFrontier.back().get_size()))/tot_sys_mem;
|
||||
else
|
||||
@ -596,8 +1005,8 @@ AGGenInstance &AGGen::generate(bool batch_process, int batch_size, int numThrd,
|
||||
else
|
||||
break;
|
||||
} //for loop for new states ends
|
||||
} //while ends
|
||||
auto ag_end= std::chrono::system_clock::now();
|
||||
} //while frontier ends
|
||||
auto ag_end= std::chrono::system_clock::now();
|
||||
}//OpenMP block ends
|
||||
gettimeofday(&t2,NULL);
|
||||
total_t+=(t2.tv_sec-t1.tv_sec)*1000.0+(t2.tv_usec-t1.tv_usec)/1000.0;
|
||||
|
||||
@ -30,6 +30,8 @@
|
||||
#include <boost/serialization/list.hpp>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <boost/serialization/string.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
|
||||
#include <boost/mpi.hpp>
|
||||
#include <boost/mpi/environment.hpp>
|
||||
@ -88,6 +90,12 @@ class AGGen {
|
||||
|
||||
AGGenInstance &generate(bool batch_process, int batch_num, int numThrd,\
|
||||
int initQSize, double mem_threshold, boost::mpi::communicator &world);
|
||||
|
||||
AGGenInstance &single_generate(bool batch_process, int batch_num, int numThrd,\
|
||||
int initQSize, double mem_threshold, boost::mpi::communicator &world);
|
||||
};
|
||||
|
||||
std::tuple<std::vector<std::tuple<ACTION_T, Quality>>, std::vector<std::tuple<ACTION_T, Topology>>>
|
||||
createPostConditions(std::tuple<Exploit, AssetGroup> &group, Keyvalue &facts);
|
||||
|
||||
#endif // AG_GEN_HPP
|
||||
|
||||
@ -4,6 +4,8 @@
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
|
||||
#include "quality.h"
|
||||
|
||||
|
||||
@ -19,6 +19,8 @@
|
||||
#include <boost/serialization/list.hpp>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <boost/serialization/string.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
|
||||
#include <boost/serialization/is_bitwise_serializable.hpp>
|
||||
|
||||
@ -33,6 +35,9 @@ class AssetGroup {
|
||||
std::vector<Quality> hypothetical_qualities;
|
||||
std::vector<Topology> hypothetical_topologies;
|
||||
|
||||
friend std::ostream & operator << (std::ostream &os, const AssetGroup &agr);
|
||||
friend class boost::serialization::access;
|
||||
|
||||
std::vector<size_t> perm;
|
||||
|
||||
template<class Archive>
|
||||
|
||||
@ -20,6 +20,10 @@
|
||||
Edge::Edge(int iFrom, int iTo, Exploit &ex, AssetGroup &ag)
|
||||
: from_node(iFrom), to_node(iTo), exploit(ex), assetGroup(ag), deleted(false) {}
|
||||
|
||||
Edge::Edge()
|
||||
{
|
||||
|
||||
}
|
||||
/**
|
||||
* @return The Edge ID
|
||||
*/
|
||||
|
||||
@ -8,6 +8,18 @@
|
||||
#include "assetgroup.h"
|
||||
#include "exploit.h"
|
||||
|
||||
#include <boost/archive/tmpdir.hpp>
|
||||
#include <boost/archive/text_iarchive.hpp>
|
||||
#include <boost/archive/text_oarchive.hpp>
|
||||
|
||||
#include <boost/serialization/base_object.hpp>
|
||||
#include <boost/serialization/utility.hpp>
|
||||
#include <boost/serialization/list.hpp>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <boost/serialization/string.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
|
||||
/** Edge class
|
||||
* @brief Edge of the graph.
|
||||
*/
|
||||
@ -20,8 +32,17 @@ class Edge {
|
||||
AssetGroup assetGroup;
|
||||
bool deleted;
|
||||
|
||||
friend std::ostream & operator << (std::ostream &os, const Edge &ed);
|
||||
friend class boost::serialization::access;
|
||||
|
||||
template<class Archive>
|
||||
void serialize(Archive &ar, const unsigned int version){
|
||||
ar & edge_current_id & id & from_node & to_node & exploit & assetGroup & deleted;
|
||||
}
|
||||
|
||||
public:
|
||||
Edge(int, int, Exploit &, AssetGroup &);
|
||||
Edge();
|
||||
|
||||
std::string get_query();
|
||||
std::string get_asset_query();
|
||||
|
||||
@ -19,6 +19,7 @@
|
||||
#include <boost/serialization/list.hpp>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <boost/serialization/string.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
#include <boost/serialization/is_bitwise_serializable.hpp>
|
||||
|
||||
@ -35,6 +36,9 @@ typedef std::tuple<ACTION_T, ParameterizedTopology> PostconditionT;
|
||||
* - postconditions
|
||||
*/
|
||||
class Exploit {
|
||||
friend std::ostream & operator << (std::ostream &os, const Exploit &ex);
|
||||
friend class boost::serialization::access;
|
||||
|
||||
int id;
|
||||
std::string name;
|
||||
size_t num_params;
|
||||
@ -48,7 +52,7 @@ class Exploit {
|
||||
|
||||
template<class Archive>
|
||||
void serialize(Archive &ar, const unsigned int /* file_version */){
|
||||
ar & id & num_params & group & preconds_q & preconds_t & postconds_q & postconds_t;
|
||||
ar & id & name & num_params & group & preconds_q & preconds_t & postconds_q & postconds_t;
|
||||
}
|
||||
public:
|
||||
Exploit(int preId, std::string &preName, int preNumParams,
|
||||
|
||||
@ -171,7 +171,6 @@ size_t Factbase::hash(Keyvalue &factlist) const {
|
||||
std::set<size_t> factset_q;
|
||||
std::transform(qualities.begin(), qualities.end(), std::inserter(factset_q, factset_q.end()),
|
||||
[&](const Quality &q) -> size_t {
|
||||
//THIS WAS THE ORIGINAL ONE THAT WAS ERRORING:
|
||||
return q.encode(factlist).enc;});
|
||||
|
||||
std::for_each(factset_q.begin(), factset_q.end(),
|
||||
@ -191,7 +190,7 @@ size_t Factbase::hash(Keyvalue &factlist) const {
|
||||
*/
|
||||
void Factbase::print() const {
|
||||
cout << "ID: " << id << endl;
|
||||
// cout << "HASH: " << hash() << endl;
|
||||
//cout << "HASH: " << hash() << endl;
|
||||
cout << "Qualities: " << qualities.size() << endl;
|
||||
cout << "Topologies: " << topologies.size() << endl << endl;
|
||||
for (auto &qual : qualities) {
|
||||
|
||||
@ -46,7 +46,7 @@ class Factbase {
|
||||
|
||||
template<class Archive>
|
||||
void serialize(Archive &ar, const unsigned int /* file_version */){
|
||||
ar & qualities & topologies;
|
||||
ar & qualities & topologies & id;
|
||||
//ar & current_id & id & qsize & tsize & qualities & topologies;
|
||||
//ar & qualities;
|
||||
//ar & topologies;
|
||||
|
||||
@ -19,6 +19,8 @@
|
||||
#include <boost/serialization/list.hpp>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <boost/serialization/string.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
|
||||
#include <boost/serialization/is_bitwise_serializable.hpp>
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <string> //NOAH
|
||||
#include "../util/db_functions.h" //NOAH
|
||||
#include <string>
|
||||
#include "../util/db_functions.h"
|
||||
#include "ag_gen.h"
|
||||
#include "unistd.h" //Included for sleep function for debugging purposes only
|
||||
|
||||
|
||||
@ -17,6 +17,8 @@
|
||||
#include <boost/serialization/list.hpp>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <boost/serialization/string.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
|
||||
#include <boost/serialization/is_bitwise_serializable.hpp>
|
||||
|
||||
@ -54,8 +56,21 @@ struct ParameterizedQuality {
|
||||
std::cout << "Operation: " << op << std::endl;
|
||||
std::cout << "Value: " + value << std::endl << std::endl;
|
||||
}
|
||||
|
||||
friend std::ostream & operator << (std::ostream &os, const ParameterizedQuality &ql);
|
||||
friend class boost::serialization::access;
|
||||
|
||||
template<class Archive>
|
||||
void serialize(Archive &ar, const unsigned int /* file_version */){
|
||||
ar & param & name & value & op;
|
||||
}
|
||||
|
||||
ParameterizedQuality(){}
|
||||
ParameterizedQuality(int _param, std::string _name, std::string _value, std::string _op) :
|
||||
param(_param), name(_name), value(_value), op(_op)
|
||||
{}
|
||||
};
|
||||
BOOST_IS_BITWISE_SERIALIZABLE(ParameterizedQuality)
|
||||
//BOOST_IS_BITWISE_SERIALIZABLE(ParameterizedQuality)
|
||||
|
||||
using PostconditionQuality = std::tuple<ParameterizedQuality, std::string>;
|
||||
|
||||
|
||||
@ -14,6 +14,8 @@
|
||||
#include <boost/serialization/list.hpp>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <boost/serialization/string.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
|
||||
#include <boost/serialization/is_bitwise_serializable.hpp>
|
||||
|
||||
@ -64,9 +66,23 @@ struct ParameterizedTopology {
|
||||
std::cout << "Operation: " << op << std::endl << std::endl;
|
||||
std::cout << "Value: " << val << std::endl << std::endl;
|
||||
}
|
||||
|
||||
friend std::ostream & operator << (std::ostream &os, const ParameterizedTopology &tl);
|
||||
friend class boost::serialization::access;
|
||||
|
||||
template<class Archive>
|
||||
void serialize(Archive &ar, const unsigned int /* file_version */){
|
||||
ar & from_param & to_param & dir & prop & op & val;
|
||||
}
|
||||
|
||||
ParameterizedTopology(){}
|
||||
ParameterizedTopology(int _from_param, int _to_param, DIRECTION_T _dir, std::string _prop, std::string _op, std::string _val) :
|
||||
from_param(_from_param), to_param(_to_param), dir(_dir), prop(_prop), op(_op), val(_val)
|
||||
{}
|
||||
|
||||
};
|
||||
|
||||
BOOST_IS_BITWISE_SERIALIZABLE(ParameterizedTopology)
|
||||
//BOOST_IS_BITWISE_SERIALIZABLE(ParameterizedTopology)
|
||||
|
||||
|
||||
using PostconditionTopology = std::tuple<ParameterizedTopology, std::string>;
|
||||
|
||||
29
src/main.cpp
29
src/main.cpp
@ -44,7 +44,7 @@
|
||||
#endif // REDIS
|
||||
|
||||
namespace mpi = boost::mpi;
|
||||
|
||||
namespace mt = mpi::threading;
|
||||
|
||||
template<typename GraphEdge>
|
||||
class ag_visitor : public boost::default_dfs_visitor {
|
||||
@ -517,8 +517,22 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
std::cout << "Arguments parsed." << std::endl;
|
||||
|
||||
mpi::environment env;
|
||||
mt::level mt_level = mt::multiple;
|
||||
boost::mpi::environment env(argc, argv, mt_level);
|
||||
mt ::level provided = env.thread_level();
|
||||
|
||||
std::cout << "Asked the MPI environment to be created with threading level: "\
|
||||
<< mt_level << std::endl;
|
||||
std::cout << "MPI Environment was created with threading level: " << provided \
|
||||
<< std::endl;
|
||||
|
||||
// std::cout << "Ensure that the MPI package has the MPI_THREAD_MULTIPLE build-time option enabled,"\
|
||||
<< "or change the environment creation to be use MPI threading level of single." << std::endl;
|
||||
// exit(EXIT_FAILURE);
|
||||
|
||||
|
||||
mpi::communicator world;
|
||||
|
||||
char hammer_host[256];
|
||||
gethostname(hammer_host, 256);
|
||||
|
||||
@ -659,14 +673,17 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
std::cout << "Generating Attack Graph: " << std::flush;
|
||||
AGGen gen(_instance);//use AGGen class to instantiate an obj with the name gen! _instance obj as the parameter! constructor defined in ag_gen.cpp
|
||||
if(world.rank() == 0)
|
||||
postinstance = gen.generate(batch_process, batch_size, thread_count, init_qsize, alpha, world); //The method call to generate the attack graph, defined in ag_gen.cpp.
|
||||
|
||||
if (world.size() > 1)
|
||||
postinstance = gen.generate(batch_process, batch_size, thread_count, init_qsize, alpha, world); //The method call to generate the attack graph, defined in ag_gen.cpp.
|
||||
else
|
||||
postinstance = gen.single_generate(batch_process, batch_size, thread_count, init_qsize, alpha, world); //The method call to generate the attack graph, defined in ag_gen.cpp.
|
||||
|
||||
world.barrier();
|
||||
//Serialization Unit Testing on Postinstance Data
|
||||
//serialization_unit_testing(postinstance, world);
|
||||
//world.barrier();
|
||||
std::cout << "Done\n";
|
||||
world.barrier();
|
||||
std::cout << "Finished generation." << std::endl;
|
||||
//std::cout << "# of edges " <<postinstance.edges.size()<<std::endl;
|
||||
//std::cout << "# of edge_asset_binding" <<postinstance.edges.size()<<std::endl;
|
||||
//std::cout << "# of factbase " <<postinstance.factbases.size()<<std::endl;
|
||||
|
||||
@ -64,6 +64,175 @@ int quality_check(Quality &q1, Quality &q2){
|
||||
else return 0;
|
||||
}
|
||||
|
||||
int param_quality_check(ParameterizedQuality &pq1, ParameterizedQuality &pq2){
|
||||
if (pq1.param == pq2.param &&
|
||||
pq1.name == pq2.name &&
|
||||
pq1.value == pq1.value &&
|
||||
pq1.op == pq2.op)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
else return 0;
|
||||
}
|
||||
|
||||
int param_topology_check(ParameterizedTopology &pt1, ParameterizedTopology &pt2){
|
||||
if (pt1.get_from_param() == pt2.get_from_param() &&
|
||||
pt1.get_to_param() == pt2.get_to_param() &&
|
||||
pt1.get_dir() == pt2.get_dir() &&
|
||||
pt1.get_property() == pt2.get_property() &&
|
||||
pt1.get_operation() == pt2.get_operation() &&
|
||||
pt1.get_value() == pt2.get_value())
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
else return 0;
|
||||
}
|
||||
|
||||
int postcond_quality_check(PostconditionQ &pq1, PostconditionQ &pq2){
|
||||
|
||||
auto preq1 = std::get<1>(pq1);
|
||||
auto preq2 = std::get<1>(pq2);
|
||||
|
||||
if(param_quality_check(preq1, preq2) == 1 &&
|
||||
std::get<0>(pq1) == std::get<0>(pq2))
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
else return 0;
|
||||
}
|
||||
|
||||
int postcond_topology_check(PostconditionT &pt1, PostconditionT &pt2){
|
||||
|
||||
auto pret1 = std::get<1>(pt1);
|
||||
auto pret2 = std::get<1>(pt2);
|
||||
|
||||
if(param_topology_check(pret1, pret2) == 1 &&
|
||||
std::get<0>(pt1) == std::get<0>(pt2))
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
else return 0;
|
||||
}
|
||||
|
||||
int exploit_check(Exploit &exp1, Exploit &exp2){
|
||||
int base_corr = 0;
|
||||
|
||||
//Precondition counters
|
||||
int pre_qual_corr = 0;
|
||||
int pre_qual_count = 0;
|
||||
int pre_topo_corr = 0;
|
||||
int pre_topo_count = 0;
|
||||
|
||||
//Postcondition counters
|
||||
int post_qual_corr = 0;
|
||||
int post_qual_count = 0;
|
||||
int post_topo_corr = 0;
|
||||
int post_topo_count = 0;
|
||||
|
||||
//Base member check correctness
|
||||
if (exp1.get_id() == exp2.get_id() &&
|
||||
exp1.get_name() == exp2.get_name() &&
|
||||
exp1.get_group() == exp2.get_group() &&
|
||||
exp1.get_num_params() == exp2.get_num_params())
|
||||
{
|
||||
base_corr = 1;
|
||||
}
|
||||
|
||||
//Init lists for pre and post conditions for quals and topos
|
||||
auto exp1_pre_quals = exp1.precond_list_q();
|
||||
auto exp2_pre_quals = exp2.precond_list_q();
|
||||
|
||||
auto exp1_pre_topos = exp1.precond_list_t();
|
||||
auto exp2_pre_topos = exp2.precond_list_t();
|
||||
|
||||
auto exp1_post_quals = exp1.postcond_list_q();
|
||||
auto exp2_post_quals = exp2.postcond_list_q();
|
||||
|
||||
auto exp1_post_topos = exp1.postcond_list_t();
|
||||
auto exp2_post_topos = exp2.postcond_list_t();
|
||||
|
||||
//Init iterators for preconds
|
||||
auto itq1 = exp1_pre_quals.begin();
|
||||
auto itq2 = exp2_pre_quals.begin();
|
||||
|
||||
auto itt1 = exp1_pre_topos.begin();
|
||||
auto itt2 = exp2_pre_topos.begin();
|
||||
|
||||
//Check Precondition Qualities
|
||||
while(itq1 != exp1_pre_quals.end() || itq2 != exp2_pre_quals.end())
|
||||
{
|
||||
pre_qual_corr += param_quality_check(*itq1, *itq2);
|
||||
pre_qual_count++;
|
||||
|
||||
if(itq1 != exp1_pre_quals.end())
|
||||
++itq1;
|
||||
|
||||
if(itq2 != exp2_pre_quals.end())
|
||||
++itq2;
|
||||
}
|
||||
|
||||
//Check Precondition Topologies
|
||||
while(itt1 != exp1_pre_topos.end() || itt2 != exp2_pre_topos.end())
|
||||
{
|
||||
pre_topo_corr += param_topology_check(*itt1, *itt2);
|
||||
pre_topo_count++;
|
||||
|
||||
if(itt1 != exp1_pre_topos.end())
|
||||
++itt1;
|
||||
|
||||
if(itt2 != exp2_pre_topos.end())
|
||||
++itt2;
|
||||
}
|
||||
|
||||
//Init iterators for postconds
|
||||
auto itpq1 = exp1_post_quals.begin();
|
||||
auto itpq2 = exp2_post_quals.begin();
|
||||
|
||||
auto itpt1 = exp1_post_topos.begin();
|
||||
auto itpt2 = exp2_post_topos.begin();
|
||||
|
||||
//Check Postcondition Qualities
|
||||
while(itpq1 != exp1_post_quals.end() || itpq2 != exp2_post_quals.end())
|
||||
{
|
||||
post_qual_corr += postcond_quality_check(*itpq1, *itpq2);
|
||||
post_qual_count++;
|
||||
|
||||
if(itpq1 != exp1_post_quals.end())
|
||||
++itpq1;
|
||||
|
||||
if(itpq2 != exp2_post_quals.end())
|
||||
++itpq2;
|
||||
}
|
||||
|
||||
//Check Postcondition Topologies
|
||||
while(itpt1 != exp1_post_topos.end() || itpt2 != exp2_post_topos.end())
|
||||
{
|
||||
post_topo_corr += postcond_topology_check(*itpt1, *itpt2);
|
||||
post_topo_count++;
|
||||
|
||||
if(itpt1 != exp1_post_topos.end())
|
||||
++itpt1;
|
||||
|
||||
if(itpt2 != exp2_post_topos.end())
|
||||
++itpt2;
|
||||
}
|
||||
|
||||
if( base_corr == 1 &&
|
||||
pre_qual_count == pre_qual_corr &&
|
||||
pre_topo_count == pre_topo_corr &&
|
||||
post_qual_count == post_qual_corr &&
|
||||
post_topo_count == post_topo_corr)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
else return 0;
|
||||
}
|
||||
|
||||
void save_topology(const Topology &t, const char * filename){
|
||||
std::ofstream ofs(filename);
|
||||
boost::archive::text_oarchive oa(ofs);
|
||||
@ -90,6 +259,59 @@ int topology_check(Topology &t1, Topology &t2){
|
||||
else return 0;
|
||||
}
|
||||
|
||||
int ag_check(AssetGroup &ag1, AssetGroup &ag2){
|
||||
int qual_count = 0;
|
||||
int qual_corr = 0;
|
||||
int topo_count = 0;
|
||||
int topo_corr = 0;
|
||||
|
||||
auto ag1_quals = ag1.get_hypo_quals();
|
||||
auto ag2_quals = ag2.get_hypo_quals();
|
||||
|
||||
auto ag1_topos = ag1.get_hypo_topos();
|
||||
auto ag2_topos = ag2.get_hypo_topos();
|
||||
|
||||
auto itq1 = ag1_quals.begin();
|
||||
auto itq2 = ag2_quals.begin();
|
||||
|
||||
auto itt1 = ag1_topos.begin();
|
||||
auto itt2 = ag2_topos.begin();
|
||||
|
||||
while(itq1 != ag1_quals.end() || itq2 != ag2_quals.end())
|
||||
{
|
||||
qual_corr += quality_check(*itq1, *itq2);
|
||||
qual_count++;
|
||||
|
||||
if(itq1 != ag1_quals.end())
|
||||
++itq1;
|
||||
|
||||
if(itq2 != ag2_quals.end())
|
||||
++itq2;
|
||||
}
|
||||
|
||||
while(itt1 != ag1_topos.end() || itt2 != ag2_topos.end())
|
||||
{
|
||||
topo_corr += topology_check(*itt1, *itt2);
|
||||
topo_count++;
|
||||
|
||||
if(itt1 != ag1_topos.end())
|
||||
++itt1;
|
||||
|
||||
if(itt2 != ag2_topos.end())
|
||||
++itt2;
|
||||
}
|
||||
|
||||
if(ag1.get_perm() == ag2.get_perm() &&
|
||||
qual_count == qual_corr &&
|
||||
topo_count == topo_corr)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
else return 0;
|
||||
|
||||
}
|
||||
|
||||
void save_factbase(const Factbase &fb, const char * filename){
|
||||
std::ofstream ofs(filename);
|
||||
boost::archive::text_oarchive oa(ofs);
|
||||
@ -196,9 +418,10 @@ void serialization_unit_testing(AGGenInstance &instance, boost::mpi::communicato
|
||||
if(world.rank() != 0)
|
||||
world.send(0, 0, rollcall);
|
||||
else{
|
||||
std::cout << rollcall << std::endl;
|
||||
for(int i = 0; i < world.size()-1; i++){
|
||||
world.recv(mpi::any_source, 0, str_host);
|
||||
std::cout << str_host << std::endl;
|
||||
world.recv(mpi::any_source, 0, rollcall);
|
||||
std::cout << rollcall << std::endl;
|
||||
}
|
||||
std::cout << "" << std::endl;
|
||||
}
|
||||
@ -220,8 +443,40 @@ void serialization_unit_testing(AGGenInstance &instance, boost::mpi::communicato
|
||||
int fb_corr = 0;
|
||||
int ns_count = 0;
|
||||
int ns_corr = 0;
|
||||
int ag_count = 0;
|
||||
int ag_corr = 0;
|
||||
int exp_corr = 0;
|
||||
int exp_count = 0;
|
||||
|
||||
std::vector<AssetGroup> asset_groups;
|
||||
std::vector<Quality> asset_group_quals;
|
||||
std::vector<Topology> asset_group_topos;
|
||||
|
||||
|
||||
if(world.rank() == 0){
|
||||
std::cout << "Performing Unit Testing on Exploit Serialization." << std::endl;
|
||||
}
|
||||
|
||||
for (auto exp : instance.exploits)
|
||||
{
|
||||
Exploit new_exp;
|
||||
if(world.rank() == 0)
|
||||
new_exp = exp;
|
||||
|
||||
mpi::request req;
|
||||
|
||||
broadcast(world, new_exp, 0);
|
||||
exp_count++;
|
||||
exp_corr += exploit_check(exp, new_exp);
|
||||
}
|
||||
int total_exp_corr;
|
||||
|
||||
reduce(world, exp_corr, total_exp_corr, std::plus<int>(), 0);
|
||||
|
||||
if (world.rank() == 0){
|
||||
std::cout << "Exploit Unit Testing: " << std::to_string(total_exp_corr) << "/" << std::to_string(world.size() * exp_count) << std::endl;
|
||||
printf("\n");
|
||||
|
||||
std::cout << "Performing Unit Testing on Quality Serialization." << std::endl;
|
||||
}
|
||||
|
||||
@ -236,6 +491,8 @@ void serialization_unit_testing(AGGenInstance &instance, boost::mpi::communicato
|
||||
broadcast(world, new_qual, 0);
|
||||
qual_count++;
|
||||
qual_corr += quality_check(qual, new_qual);
|
||||
|
||||
asset_group_quals.emplace_back(qual);
|
||||
}
|
||||
|
||||
int total_qual_corr;
|
||||
@ -260,8 +517,13 @@ void serialization_unit_testing(AGGenInstance &instance, boost::mpi::communicato
|
||||
broadcast(world, new_top, 0);
|
||||
top_count++;
|
||||
top_corr += topology_check(topo, new_top);
|
||||
|
||||
asset_group_topos.emplace_back(topo);
|
||||
}
|
||||
|
||||
std::vector<size_t> perm;
|
||||
perm.emplace_back(2);
|
||||
|
||||
int total_top_corr;
|
||||
|
||||
reduce(world, top_corr, total_top_corr, std::plus<int>(), 0);
|
||||
@ -269,7 +531,37 @@ void serialization_unit_testing(AGGenInstance &instance, boost::mpi::communicato
|
||||
if (world.rank() == 0){
|
||||
std::cout << "Topology Unit Testing: " << std::to_string(total_top_corr) << "/" << std::to_string(world.size() * top_count) << std::endl;
|
||||
printf("\n");
|
||||
|
||||
|
||||
std::cout << "Performing Unit Testing on AssetGroup Serialization." << std::endl;
|
||||
}
|
||||
|
||||
int total_ag_corr;
|
||||
std::vector<size_t> vec;
|
||||
vec.resize(2);
|
||||
std::fill(vec.begin(),vec.end(),0);
|
||||
|
||||
AssetGroup default_ag = AssetGroup(asset_group_quals, asset_group_topos, vec);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
{
|
||||
AssetGroup new_ag;
|
||||
if(world.rank() == 0){
|
||||
new_ag = AssetGroup(asset_group_quals, asset_group_topos, vec);
|
||||
}
|
||||
|
||||
mpi::request req;
|
||||
|
||||
broadcast(world, new_ag, 0);
|
||||
ag_count++;
|
||||
ag_corr += ag_check(new_ag, default_ag);
|
||||
}
|
||||
|
||||
reduce(world, ag_corr, total_ag_corr, std::plus<int>(), 0);
|
||||
|
||||
if (world.rank() == 0){
|
||||
std::cout << "AssetGroup Unit Testing: " << std::to_string(total_ag_corr) << "/" << std::to_string(world.size() * ag_count) << std::endl;
|
||||
printf("\n");
|
||||
|
||||
std::cout << "Performing Unit Testing on Factbase Serialization." << std::endl;
|
||||
}
|
||||
|
||||
@ -327,6 +619,15 @@ void serialization_unit_testing(AGGenInstance &instance, boost::mpi::communicato
|
||||
}
|
||||
|
||||
if(world.rank() == 0){
|
||||
|
||||
if(total_exp_corr == world.size() * exp_count)
|
||||
{
|
||||
std::cout << "100% Success Rate for Exploit Serialization." << std::endl;
|
||||
}
|
||||
else{
|
||||
std::cout << "Errors occurred in the Exploit Serialization." << std::endl;
|
||||
e_flag = 1;
|
||||
}
|
||||
|
||||
if(total_qual_corr == world.size() * qual_count)
|
||||
{
|
||||
@ -337,7 +638,6 @@ void serialization_unit_testing(AGGenInstance &instance, boost::mpi::communicato
|
||||
e_flag = 1;
|
||||
}
|
||||
|
||||
|
||||
if(total_top_corr == world.size() * top_count)
|
||||
{
|
||||
std::cout << "100% Success Rate for Topology Serialization." << std::endl;
|
||||
@ -347,6 +647,15 @@ void serialization_unit_testing(AGGenInstance &instance, boost::mpi::communicato
|
||||
e_flag = 1;
|
||||
}
|
||||
|
||||
if(total_ag_corr == world.size() * ag_count)
|
||||
{
|
||||
std::cout << "100% Success Rate for AssetGroup Serialization." << std::endl;
|
||||
}
|
||||
else{
|
||||
std::cout << "Errors occurred in the AssetGroup Serialization." << std::endl;
|
||||
e_flag = 1;
|
||||
}
|
||||
|
||||
if(total_fb_corr == world.size() * fb_count)
|
||||
{
|
||||
std::cout << "100% Success Rate for Factbase Serialization." << std::endl;
|
||||
|
||||
@ -3,11 +3,19 @@
|
||||
|
||||
void save_quality(const Quality &q, const char * filename);
|
||||
void restore_quality(Quality &q, const char * filename);
|
||||
int quality_check(Quality &q1, Quality &q2);
|
||||
|
||||
void save_topology(const Topology &t, const char * filename);
|
||||
void restore_topology(Topology &t, const char * filename);
|
||||
|
||||
int quality_check(Quality &q1, Quality &q2);
|
||||
int ag_check(AssetGroup &ag1, AssetGroup &ag2);
|
||||
int factbase_check(Factbase &fb1, Factbase &fb2);
|
||||
int topology_check(Topology &t1, Topology &t2);
|
||||
int network_state_check(NetworkState &ns1, NetworkState &ns2);
|
||||
|
||||
int param_quality_check(ParameterizedQuality &pq1, ParameterizedQuality &pq2);
|
||||
int param_topology_check(ParameterizedTopology &pt1, ParameterizedTopology &pt2);
|
||||
int postcond_quality_check(PostconditionQ &pq1, PostconditionQ &pq2);
|
||||
int postcond_topology_check(PostconditionT &pt1, PostconditionT &pt2);
|
||||
|
||||
void serialization_unit_testing(AGGenInstance &instance, boost::mpi::communicator &world);
|
||||
|
||||
|
||||
@ -1,12 +1,39 @@
|
||||
#include <iostream>
|
||||
#include <sys/time.h>
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <tuple>
|
||||
#include <unordered_map>
|
||||
#include <omp.h>
|
||||
#include <sys/time.h>
|
||||
#include <string.h>
|
||||
#include <map>
|
||||
|
||||
#include <boost/mpi/collectives.hpp>
|
||||
#include <boost/archive/tmpdir.hpp>
|
||||
#include <boost/archive/text_iarchive.hpp>
|
||||
#include <boost/archive/text_oarchive.hpp>
|
||||
|
||||
#include <boost/serialization/base_object.hpp>
|
||||
#include <boost/serialization/utility.hpp>
|
||||
#include <boost/serialization/list.hpp>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <boost/serialization/string.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
#include <boost/mpi.hpp>
|
||||
#include <boost/mpi/environment.hpp>
|
||||
#include <boost/mpi/communicator.hpp>
|
||||
#include <boost/mpi/collectives.hpp>
|
||||
#include <boost/serialization/is_bitwise_serializable.hpp>
|
||||
|
||||
#include "../util/db_functions.h"
|
||||
#include "../util/avail_mem.h"
|
||||
#include "../util/odometer.h"
|
||||
#include "serialize_tuple.h"
|
||||
|
||||
#include "serialize_tuple.h"
|
||||
#include "../ag_gen/ag_gen.h"
|
||||
#include "tasks.h"
|
||||
|
||||
void task_zero(AGGenInstance &instance, std::deque<NetworkState> &localFrontier, double mem_threshold)
|
||||
{
|
||||
@ -30,91 +57,88 @@ void task_zero(AGGenInstance &instance, std::deque<NetworkState> &localFrontier,
|
||||
//Leave a 30% buffer in alpha
|
||||
while((f_alpha <= (mem_threshold * 0.7)) && !unex_empty());
|
||||
|
||||
std::cout << "Retrieved " << retrv_counter << " factbases from the database." << std::endl;
|
||||
//std::cout << "Retrieved " << retrv_counter << " factbases from the database." << std::endl;
|
||||
gettimeofday(&tt2,NULL);
|
||||
total_tt+=(tt2.tv_sec-tt1.tv_sec)*1000.0+(tt2.tv_usec-tt1.tv_usec)/1000.0;
|
||||
//printf("Retrieving from db took %lf s.\n", total_tt);
|
||||
}
|
||||
|
||||
//TODO: These nodes need an updated instance
|
||||
void task_one(AGGenInstance &instance, NetworkState ¤t_state,\
|
||||
std::vector<Exploit> &exploit_list, std::unordered_map<size_t, PermSet<size_t>> &od_map,\
|
||||
int alloc, int two_alloc, int reduc_factor, int num_tasks, boost::mpi::communicator &world){
|
||||
|
||||
boost::mpi::communicator tcomm = world.split(world.rank() > 0 && world.rank() <= alloc);
|
||||
int alloc, int two_alloc, int reduc_factor, int num_tasks, boost::mpi::communicator &world,\
|
||||
boost::mpi::communicator &tcomm){
|
||||
|
||||
//std::cout << "Process rank " << world.rank() << " with " << alloc << " node(s) allocated has started Task 1." << std::endl;
|
||||
std::vector<std::tuple<Exploit, AssetGroup>> appl_exploits;
|
||||
unsigned long esize = exploit_list.size();
|
||||
|
||||
//Distribute work to all nodes
|
||||
for (size_t i = 0; i < esize; i++) {//for loop for applicable exploits starts
|
||||
if (i % alloc != world.rank()+1)
|
||||
continue;
|
||||
auto e = exploit_list.at(i);
|
||||
size_t num_params = e.get_num_params();
|
||||
auto preconds_q = e.precond_list_q();
|
||||
auto preconds_t = e.precond_list_t();
|
||||
auto perms = od_map[num_params];
|
||||
std::vector<AssetGroup> asset_groups;
|
||||
for (auto perm : perms) {
|
||||
std::vector<Quality> asset_group_quals;
|
||||
std::vector<Topology> asset_group_topos;
|
||||
asset_group_quals.reserve(preconds_q.size());
|
||||
asset_group_topos.reserve(preconds_t.size());
|
||||
//Distribute work to all nodes
|
||||
for (size_t i = 0; i < esize; i++) {//for loop for applicable exploits starts
|
||||
if (i % alloc != send_check(world, world.rank()))
|
||||
continue;
|
||||
auto e = exploit_list.at(i);
|
||||
size_t num_params = e.get_num_params();
|
||||
auto preconds_q = e.precond_list_q();
|
||||
auto preconds_t = e.precond_list_t();
|
||||
auto perms = od_map[num_params];
|
||||
std::vector<AssetGroup> asset_groups;
|
||||
for (auto perm : perms) {
|
||||
std::vector<Quality> asset_group_quals;
|
||||
std::vector<Topology> asset_group_topos;
|
||||
asset_group_quals.reserve(preconds_q.size());
|
||||
asset_group_topos.reserve(preconds_t.size());
|
||||
|
||||
|
||||
//std::vector<int>::size_type sz;
|
||||
//sz=asset_group_quals.capacity();
|
||||
for (auto &precond : preconds_q) {
|
||||
|
||||
//Old quality encode caused this to crash
|
||||
asset_group_quals.emplace_back(
|
||||
perm[precond.get_param_num()], precond.name, precond.op,
|
||||
precond.value, instance.facts);
|
||||
}
|
||||
for (auto &precond : preconds_t) {
|
||||
auto dir = precond.get_dir();
|
||||
auto prop = precond.get_property();
|
||||
auto op = precond.get_operation();
|
||||
auto val = precond.get_value();
|
||||
asset_group_topos.emplace_back(
|
||||
perm[precond.get_from_param()],
|
||||
perm[precond.get_to_param()], dir, prop, op, val, instance.facts);
|
||||
}
|
||||
asset_groups.emplace_back(asset_group_quals, asset_group_topos,
|
||||
perm);
|
||||
for (auto &precond : preconds_q) {
|
||||
|
||||
asset_group_quals.emplace_back(
|
||||
perm[precond.get_param_num()], precond.name, precond.op,
|
||||
precond.value, instance.facts);
|
||||
}
|
||||
auto assetgroup_size = asset_groups.size();
|
||||
for (size_t j = 0; j < assetgroup_size; j++) {
|
||||
auto asset_group = asset_groups.at(j);
|
||||
for (auto &quality : asset_group.get_hypo_quals()) {
|
||||
if (!current_state.get_factbase().find_quality(quality)) {
|
||||
goto LOOPCONTINUE1;
|
||||
}
|
||||
}
|
||||
for (auto &topology : asset_group.get_hypo_topos()) {
|
||||
if (!current_state.get_factbase().find_topology(topology)) {
|
||||
goto LOOPCONTINUE1;
|
||||
}
|
||||
}
|
||||
{
|
||||
auto new_appl_exploit = std::make_tuple(e, asset_group);
|
||||
appl_exploits.push_back(new_appl_exploit);
|
||||
}
|
||||
LOOPCONTINUE1:;
|
||||
for (auto &precond : preconds_t) {
|
||||
auto dir = precond.get_dir();
|
||||
auto prop = precond.get_property();
|
||||
auto op = precond.get_operation();
|
||||
auto val = precond.get_value();
|
||||
asset_group_topos.emplace_back(
|
||||
perm[precond.get_from_param()],
|
||||
perm[precond.get_to_param()], dir, prop, op, val, instance.facts);
|
||||
}
|
||||
} //for loop for applicable exploits ends
|
||||
asset_groups.emplace_back(asset_group_quals, asset_group_topos,
|
||||
perm);
|
||||
}
|
||||
auto assetgroup_size = asset_groups.size();
|
||||
for (size_t j = 0; j < assetgroup_size; j++) {
|
||||
auto asset_group = asset_groups.at(j);
|
||||
|
||||
for (auto &quality : asset_group.get_hypo_quals()) {
|
||||
if (!current_state.get_factbase().find_quality(quality)) {
|
||||
goto LOOPCONTINUE1;
|
||||
}
|
||||
}
|
||||
for (auto &topology : asset_group.get_hypo_topos()) {
|
||||
if (!current_state.get_factbase().find_topology(topology)) {
|
||||
goto LOOPCONTINUE1;
|
||||
}
|
||||
}
|
||||
{
|
||||
auto new_appl_exploit = std::make_tuple(e, asset_group);
|
||||
appl_exploits.push_back(new_appl_exploit);
|
||||
}
|
||||
LOOPCONTINUE1:;
|
||||
}
|
||||
} //for loop for applicable exploits ends
|
||||
|
||||
//Less nodes allocated to task 2 than task 1.
|
||||
//Distribute the appl_exploit list from the extra node in task 1 to all other nodes in this task
|
||||
if (two_alloc < alloc){
|
||||
std::vector<std::tuple<Exploit, AssetGroup>> partial_appl_exploits;
|
||||
mpi::scatter(local, appl_exploits, partial_appl_exploits, world.rank()==alloc);
|
||||
mpi::scatter(tcomm, &appl_exploits, partial_appl_exploits, alloc);
|
||||
|
||||
if(world.rank() < alloc){
|
||||
for(auto itr=partial_appl_exploits.begin(); itr!=partial_appl_exploits.end(); itr++){
|
||||
auto index_r=std::distance(partial_appl_exploits.begin(),itr);
|
||||
appl_exploits.push_back(partial_appl_exploits.at(index_r));
|
||||
auto index_r=std::distance(partial_appl_exploits.begin(),itr);
|
||||
appl_exploits.push_back(partial_appl_exploits.at(index_r));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,182 +147,276 @@ void task_one(AGGenInstance &instance, NetworkState ¤t_state,\
|
||||
if (two_alloc < alloc)
|
||||
skip_greatest = 1;
|
||||
if(world.rank() <= alloc - skip_greatest){
|
||||
world.isend(world.rank() + alloc, 0, appl_exploits);
|
||||
world.isend(world.rank() + alloc, 0, current_state);
|
||||
world.isend(send_check(world, world.rank() + alloc -1), 30, appl_exploits);
|
||||
world.isend(send_check(world, world.rank() + alloc -1), 0, current_state);
|
||||
}
|
||||
}
|
||||
|
||||
//Note: This means that these nodes also need to update their instance!!
|
||||
void task_two(AGGenInstance &instance)
|
||||
{
|
||||
NetworkState{current_state};
|
||||
void task_two(AGGenInstance &instance, int alloc, int two_alloc, boost::mpi::communicator &world,\
|
||||
std::deque<NetworkState> &localFrontier, double mem_threshold, boost::mpi::communicator &ttwo_comm,\
|
||||
std::vector<std::string> ex_groups, std::unordered_map<size_t, int> &hash_map){
|
||||
|
||||
NetworkState current_state;
|
||||
std::vector<std::tuple<Exploit, AssetGroup>> appl_exploits;
|
||||
world.irecv(mpi::any_source, 0, current_state);
|
||||
world.irecv(mpi::any_source, 0, appl_exploits);
|
||||
|
||||
world.recv(mpi::any_source, 30, appl_exploits);
|
||||
world.recv(mpi::any_source, 0, current_state);
|
||||
|
||||
std::vector<std::tuple<Exploit, AssetGroup>> partial_appl_exploits;
|
||||
if(ttwo_comm.size() > 1)
|
||||
mpi::scatter(ttwo_comm, &partial_appl_exploits, appl_exploits, 0);
|
||||
|
||||
auto current_hash = current_state.get_hash(instance.facts);
|
||||
auto appl_expl_size = appl_exploits.size();
|
||||
|
||||
//Sync Fire work with lowest rank node
|
||||
//ifworld.rank() == lowest
|
||||
std::map<std::string, int> group_fired; //Map to hold fired status per group
|
||||
std::map<std::string, std::vector<std::tuple<Exploit, AssetGroup>>> sync_vectors; //Map to hold all group exploits
|
||||
|
||||
//skip flag is used to ensure that the egroup loop is not repeatedly run more than necessary
|
||||
int skip_flag=0;
|
||||
for (auto map_group : ex_groups)
|
||||
{
|
||||
group_fired.insert(std::pair<std::string, int> (map_group, 0));
|
||||
}
|
||||
|
||||
//vector for holding the appl_exploits indices at which groups exist
|
||||
std::vector<int> idr_idx;
|
||||
std::string egroup;
|
||||
|
||||
//vector for holding indices that have already fired
|
||||
std::vector<int> fired_idx;
|
||||
//Build up the map of synchronous fire exploits and send to 0th node of task 2
|
||||
for(auto itr=appl_exploits.begin(); itr!=appl_exploits.end(); itr++){
|
||||
//auto e = appl_exploits.at(itr);
|
||||
|
||||
//iterator for the applicable exploits vector
|
||||
auto itr=appl_exploits.begin();
|
||||
auto e = *itr;
|
||||
egroup = std::get<0>(e).get_group();
|
||||
|
||||
int break_flag=0;
|
||||
int testing_flag=0;
|
||||
if (egroup != "null"){
|
||||
sync_vectors[egroup].push_back(e);
|
||||
}
|
||||
}
|
||||
|
||||
if(ttwo_comm.rank() != 0){
|
||||
for (auto map_group : ex_groups){
|
||||
ttwo_comm.isend(0, 6, sync_vectors[egroup]);
|
||||
}
|
||||
for(auto itr = appl_exploits.begin(); itr != appl_exploits.end();){
|
||||
egroup = std::get<0>(*itr).get_group();
|
||||
if (egroup == "null"){
|
||||
itr = appl_exploits.erase(itr);
|
||||
}
|
||||
else{
|
||||
itr++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(ttwo_comm.rank() == 0 && ttwo_comm.size() > 1){
|
||||
for(auto itr = appl_exploits.begin(); itr != appl_exploits.end();){
|
||||
itr = appl_exploits.erase(itr);
|
||||
}
|
||||
for (int r = 0; r < ex_groups.size() * (ttwo_comm.size()-1); r++){
|
||||
ttwo_comm.irecv(mpi::any_source, 6, partial_appl_exploits);
|
||||
for(auto itr=partial_appl_exploits.begin(); itr!=partial_appl_exploits.end(); itr++){
|
||||
auto index_r=std::distance(partial_appl_exploits.begin(),itr);
|
||||
appl_exploits.push_back(partial_appl_exploits.at(index_r));
|
||||
}
|
||||
}
|
||||
}
|
||||
//loop through the vector
|
||||
for(auto itr=appl_exploits.begin(); itr!=appl_exploits.end(); itr++){
|
||||
//keep track of index for later use
|
||||
auto index=std::distance(appl_exploits.begin(), itr);
|
||||
//reset break flag
|
||||
break_flag=0;
|
||||
for(auto itr=appl_exploits.begin(); itr!=appl_exploits.end(); itr++){
|
||||
|
||||
//To avoid double-fire, check if an index has already been run.
|
||||
//If it has, then there is no need to run through this loop again.
|
||||
for(auto itr_f=fired_idx.begin(); itr_f!=fired_idx.end(); itr_f++){
|
||||
auto index_f=std::distance(fired_idx.begin(),itr_f);
|
||||
if(index==index_f)
|
||||
break_flag=1;
|
||||
}
|
||||
|
||||
if (break_flag==1)
|
||||
break;
|
||||
|
||||
//empty the appl_exploits index vector at the start of each loop so that
|
||||
//it doesn't contain stale data from a previous loop
|
||||
idr_idx.clear();
|
||||
|
||||
NetworkState new_state{current_state};
|
||||
//auto e = appl_exploits.at(j);
|
||||
|
||||
/* Synchronous fire function
|
||||
|
||||
First: double/sanity checks to see if there are other exploits that need to be fired
|
||||
This also prevents the firing from occurring when it shouldn't via a regular passthrough
|
||||
After popping, it checks if the vector is empty. If it is, then we no longer need to
|
||||
re-fill the vector since we've gone through all possibilities
|
||||
*/
|
||||
SYNCH_FIRE:;
|
||||
if(!idr_idx.empty()){
|
||||
//std::cout<<"IDR Size " << idr_idx.size()<<std::endl;
|
||||
index=idr_idx.back();
|
||||
idr_idx.pop_back();
|
||||
if(idr_idx.empty())
|
||||
skip_flag=1;
|
||||
fired_idx.push_back(index);
|
||||
}
|
||||
|
||||
|
||||
auto e = appl_exploits.at(index);
|
||||
auto e = *itr;
|
||||
auto exploit = std::get<0>(e);
|
||||
//std::cout<<exploit.get_name()<<std::endl;
|
||||
auto assetGroup = std::get<1>(e);
|
||||
|
||||
//For synchronous firing: get indices of all exploits in the same group and
|
||||
//push them onto the index vector for later use
|
||||
auto egroup=exploit.get_group();
|
||||
|
||||
if (egroup!="null" && idr_idx.empty() && skip_flag==0){
|
||||
for(int i=0; i!=appl_exploits.size(); i++){
|
||||
if((std::get<0>(appl_exploits.at(i))).get_group()==egroup && i!=index){
|
||||
idr_idx.emplace_back(i);
|
||||
egroup=exploit.get_group();
|
||||
|
||||
if ((egroup != "null" && group_fired[egroup] == 0) || egroup == "null"){
|
||||
NetworkState new_state{current_state};
|
||||
std::vector<std::tuple<Exploit, AssetGroup>> sync_exploits;
|
||||
|
||||
if (egroup == "null")
|
||||
sync_exploits.push_back(e);
|
||||
|
||||
else {
|
||||
sync_exploits = sync_vectors[egroup];
|
||||
|
||||
//TODO: Does not work if only some assets belong to a group. This only works if
|
||||
//all assets are in the group
|
||||
if(sync_exploits.size() < instance.assets.size()){
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: If there are other assets in group,
|
||||
//but you check idr_idx after filling and it's still empty
|
||||
//you know that the other asset isn't ready to be fired yet, so wait.
|
||||
//THIS BREAKS CODE IF ONLY 1 ASSET IN GROUP EXPLOIT. NEED TO FIGURE OUT HOW TO SEE HOW MANY ASSETS ARE IN GROUP
|
||||
//std::cout<<std::get<1>(e).size()<<std::endl;
|
||||
//if(std::get<1>(e).size()>1){
|
||||
if(idr_idx.empty()){
|
||||
testing_flag=1;
|
||||
}
|
||||
// }
|
||||
}
|
||||
if(testing_flag==1)
|
||||
break;
|
||||
skip_flag=0;
|
||||
auto assetGroup = std::get<1>(e);
|
||||
//assetGroup.print_group();
|
||||
//std::cout<<std::endl;
|
||||
auto postconditions = createPostConditions(e, instance.facts);
|
||||
auto qualities = std::get<0>(postconditions);
|
||||
auto topologies = std::get<1>(postconditions);
|
||||
|
||||
for(auto &qual : qualities) {
|
||||
auto action = std::get<0>(qual);
|
||||
auto fact = std::get<1>(qual);
|
||||
switch(action) {
|
||||
case ADD_T:
|
||||
new_state.add_quality(fact);
|
||||
break;
|
||||
case UPDATE_T:
|
||||
new_state.update_quality(fact);
|
||||
|
||||
//TODO: if fact!= "="" call new_state function, passing fact and instance.facts. Update the quality, and insert it into the hash_table instead of this convoluted mess
|
||||
if(fact.get_op()=="+="){
|
||||
|
||||
//std::cout<<" AFTER UPDATE "<<new_state.compound_assign(fact)<<std::endl;
|
||||
std::unordered_map<std::string,int>::const_iterator got = instance.facts.hash_table.find(new_state.compound_assign(fact));
|
||||
for(auto sync_itr=sync_exploits.begin(); sync_itr!=sync_exploits.end(); sync_itr++){
|
||||
e = *sync_itr;
|
||||
exploit = std::get<0>(e);
|
||||
egroup=exploit.get_group();
|
||||
assetGroup = std::get<1>(e);
|
||||
group_fired[egroup] = 1;
|
||||
|
||||
//If the value is not already in the hash_table, insert it.
|
||||
//Since the compound operators include a value that is not in the original Keyvalue object, the unordered map does not include it
|
||||
//As a result, you have to manually add it.
|
||||
if(got==instance.facts.hash_table.end()){
|
||||
instance.facts.hash_table[new_state.compound_assign(fact)]=instance.facts.size();
|
||||
instance.facts.length++;
|
||||
instance.facts.str_vector.push_back(new_state.compound_assign(fact));
|
||||
auto postconditions = createPostConditions(e, instance.facts);
|
||||
auto qualities = std::get<0>(postconditions);
|
||||
auto topologies = std::get<1>(postconditions);
|
||||
|
||||
for(auto &qual : qualities) {
|
||||
auto action = std::get<0>(qual);
|
||||
auto fact = std::get<1>(qual);
|
||||
switch(action) {
|
||||
case ADD_T:
|
||||
new_state.add_quality(fact);
|
||||
break;
|
||||
case UPDATE_T:
|
||||
new_state.update_quality(fact);
|
||||
|
||||
//TODO: if fact!= "="" call new_state function, passing fact and instance.facts. Update the quality, and insert it into the hash_table instead of this convoluted mess
|
||||
if(fact.get_op()=="+="){
|
||||
|
||||
//std::cout<<" AFTER UPDATE "<<new_state.compound_assign(fact)<<std::endl;
|
||||
std::unordered_map<std::string,int>::const_iterator got = instance.facts.hash_table.find(new_state.compound_assign(fact));
|
||||
|
||||
//If the value is not already in the hash_table, insert it.
|
||||
//Since the compound operators include a value that is not in the original Keyvalue object, the unordered map does not include it
|
||||
//As a result, you have to manually add it.
|
||||
if(got==instance.facts.hash_table.end()){
|
||||
instance.facts.hash_table[new_state.compound_assign(fact)]=instance.facts.size();
|
||||
instance.facts.length++;
|
||||
instance.facts.str_vector.push_back(new_state.compound_assign(fact));
|
||||
for (int w = 0; w < world.size(); w++)
|
||||
{
|
||||
if(w != 1 + alloc && w > two_alloc)
|
||||
{
|
||||
world.isend(w, 3, new_state);
|
||||
world.isend(w, 4, fact);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case DELETE_T:
|
||||
new_state.delete_quality(fact);
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case DELETE_T:
|
||||
new_state.delete_quality(fact);
|
||||
break;
|
||||
|
||||
for(auto &topo : topologies) {
|
||||
auto action = std::get<0>(topo);
|
||||
auto fact = std::get<1>(topo);
|
||||
switch(action) {
|
||||
case ADD_T:
|
||||
new_state.add_topology(fact);
|
||||
break;
|
||||
case UPDATE_T:
|
||||
new_state.update_topology(fact);
|
||||
break;
|
||||
case DELETE_T:
|
||||
new_state.delete_topology(fact);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}//Sync. Fire for
|
||||
|
||||
auto hash_num = new_state.get_hash(instance.facts);
|
||||
|
||||
if (hash_num == current_hash)
|
||||
continue;
|
||||
|
||||
//<6 Node Edge Case Prevention: Node 0 unable to execute task 3
|
||||
if(world.rank() != 0){
|
||||
world.isend(0, 5, new_state);
|
||||
world.isend(0, 6, current_state);
|
||||
world.isend(0, 10, exploit);
|
||||
world.isend(0, 11, assetGroup);
|
||||
}
|
||||
else {
|
||||
task_three(instance, new_state, localFrontier, mem_threshold, world,\
|
||||
two_alloc, current_state, exploit, assetGroup, hash_map);
|
||||
}
|
||||
}
|
||||
for(auto &topo : topologies) {
|
||||
auto action = std::get<0>(topo);
|
||||
auto fact = std::get<1>(topo);
|
||||
switch(action) {
|
||||
case ADD_T:
|
||||
new_state.add_topology(fact);
|
||||
break;
|
||||
case UPDATE_T:
|
||||
new_state.update_topology(fact);
|
||||
break;
|
||||
case DELETE_T:
|
||||
new_state.delete_topology(fact);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(!idr_idx.empty())
|
||||
goto SYNCH_FIRE;
|
||||
|
||||
auto hash_num = new_state.get_hash(instance.facts);
|
||||
|
||||
if (hash_num == current_hash)
|
||||
continue;
|
||||
//gettimeofday(&t1,NULL);
|
||||
|
||||
//CRTICIAL IS IN HERE
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void task_three(AGGenInstance &instance, NetworkState &new_state, std::deque<NetworkState> &localFrontier,\
|
||||
double mem_threshold, boost::mpi::communicator &world, int two_alloc, NetworkState ¤t_state,\
|
||||
Exploit &exploit, AssetGroup &assetGroup, std::unordered_map<size_t, int> &hash_map){
|
||||
|
||||
auto hash_num = new_state.get_hash(instance.facts);
|
||||
|
||||
//although local frontier is updated, the global hash is also updated to avoid testing on explored states.
|
||||
if (hash_map.find(hash_num) == hash_map.end()) {
|
||||
new_state.set_id();
|
||||
auto facts_tuple = new_state.get_factbase().get_facts_tuple();
|
||||
FactbaseItems new_items = std::make_tuple(facts_tuple, new_state.get_id());
|
||||
instance.factbase_items.push_back(new_items);
|
||||
instance.factbases.push_back(new_state.get_factbase());
|
||||
hash_map.insert(std::make_pair(new_state.get_hash(instance.facts), new_state.get_id()));
|
||||
|
||||
//See memory usage. If it exceeds the threshold, store new states in the DB
|
||||
double i_alpha = 0.0;
|
||||
//Get the most recent Factbase's size * total number of factbases, rough approximation of *2 to account for factbase_items
|
||||
double i_usage = instance.factbases.back().get_size() * instance.factbases.size() * 2 + sizeof(instance.edges[0]) * instance.edges.size();
|
||||
|
||||
auto tot_sys_mem = getTotalSystemMemory();
|
||||
i_alpha = i_usage/tot_sys_mem;
|
||||
double f_alpha;
|
||||
if (!localFrontier.empty())
|
||||
f_alpha = (static_cast<double>(localFrontier.size()) * (localFrontier.back().get_size()))/tot_sys_mem;
|
||||
else
|
||||
f_alpha = 0.0;
|
||||
|
||||
if (f_alpha >= (mem_threshold/2)) {
|
||||
//std::cout << "Frontier Alpha prior to database storing: " << f_alpha << std::endl;
|
||||
save_unexplored_to_db(new_state);
|
||||
if (!localFrontier.empty())
|
||||
f_alpha = (static_cast<double>(localFrontier.size()) * (localFrontier.back().get_size()))/tot_sys_mem;
|
||||
else
|
||||
f_alpha = 0;
|
||||
//std::cout << "Frontier Alpha after database storing: " << f_alpha << std::endl;
|
||||
}
|
||||
|
||||
//Store new state in database to ensure proper ordering of the FIFO queue
|
||||
else if (!unex_empty()){
|
||||
save_unexplored_to_db(new_state);
|
||||
}
|
||||
|
||||
//Otherwise, we can just store in memory
|
||||
else {
|
||||
localFrontier.emplace_front(new_state);
|
||||
}
|
||||
|
||||
if (i_alpha >= mem_threshold/2){
|
||||
//std::cout << "Instance Alpha prior to database storing: " << i_alpha << std::endl;
|
||||
world.isend(send_check(world, two_alloc), 7, instance.factbases);
|
||||
world.isend(send_check(world, two_alloc), 8, instance.edges);
|
||||
//save_ag_to_db(instance, true);
|
||||
|
||||
//Clear vectors and free memory
|
||||
std::vector<Factbase>().swap(instance.factbases);
|
||||
std::vector<FactbaseItems>().swap(instance.factbase_items);
|
||||
std::vector<Edge>().swap(instance.edges);
|
||||
|
||||
i_usage = (sizeof(instance.factbases) + (sizeof(instance.factbases[0]) * instance.factbases.size()) +\
|
||||
sizeof(instance.factbase_items) + (sizeof(instance.factbase_items[0]) * instance.factbase_items.size()) +\
|
||||
sizeof(instance.edges) + (sizeof(instance.edges[0]) * instance.edges.size()));
|
||||
i_alpha = i_usage/tot_sys_mem;
|
||||
//std::cout << "Instance Alpha after database storing: " << i_alpha << std::endl;
|
||||
|
||||
}
|
||||
|
||||
Edge ed(current_state.get_id(), new_state.get_id(), exploit, assetGroup);
|
||||
ed.set_id();
|
||||
instance.edges.push_back(ed);
|
||||
} //END if (hash_map.find(hash_num) == hash_map.end())
|
||||
|
||||
else {
|
||||
int id = hash_map[hash_num];
|
||||
Edge ed(current_state.get_id(), id, exploit, assetGroup);
|
||||
ed.set_id();
|
||||
instance.edges.push_back(ed);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int send_check(boost::mpi::communicator &world, int curr_node){
|
||||
int send_to = curr_node + 1;
|
||||
if (curr_node == world.size()-1)
|
||||
if (curr_node >= world.size()-1)
|
||||
send_to = 0;
|
||||
|
||||
return send_to;
|
||||
|
||||
@ -1,14 +1,27 @@
|
||||
#ifndef TASKS_H
|
||||
#define TASKS_H
|
||||
|
||||
#include <boost/mpi/communicator.hpp>
|
||||
#include <boost/mpi/collectives.hpp>
|
||||
|
||||
void task_zero(AGGenInstance &instance, std::deque<NetworkState> &localFrontier, double mem_threshold);
|
||||
|
||||
void task_one(AGGenInstance &instance, NetworkState ¤t_state,\
|
||||
std::vector<Exploit> &exploit_list, std::unordered_map<size_t, PermSet<size_t>> &od_map,\
|
||||
int alloc, int two_alloc, int reduc_factor, int num_tasks, boost::mpi::communicator &world);
|
||||
int alloc, int two_alloc, int reduc_factor, int num_tasks, boost::mpi::communicator &world,\
|
||||
boost::mpi::communicator &tcomm);
|
||||
|
||||
void task_two(AGGenInstance &instance);
|
||||
void task_two(AGGenInstance &instance, int alloc, int two_alloc, boost::mpi::communicator &world,\
|
||||
std::deque<NetworkState> &localFrontier, double mem_threshold, boost::mpi::communicator &ttwo_comm,\
|
||||
std::vector<std::string> ex_groups, std::unordered_map<size_t, int> &hash_map);
|
||||
|
||||
void task_three(AGGenInstance &instance, NetworkState &new_state, std::deque<NetworkState> &localFrontier,\
|
||||
double mem_threshold, boost::mpi::communicator &world, int two_alloc, NetworkState ¤t_state,\
|
||||
Exploit &exploit, AssetGroup &assetGroup, std::unordered_map<size_t, int> &hash_map);
|
||||
|
||||
void task_four(NetworkState &new_state);
|
||||
|
||||
int send_check(boost::mpi::communicator &world, int curr_node);
|
||||
|
||||
|
||||
#endif //TASKS_H
|
||||
|
||||
55
src/mpi/tuple.h
Normal file
55
src/mpi/tuple.h
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* tuple.h
|
||||
*
|
||||
* Created on: Jan 29, 2013
|
||||
* Author: vincent
|
||||
*/
|
||||
|
||||
#ifndef ATLAS_TUPLE_H_
|
||||
#define ATLAS_TUPLE_H_
|
||||
|
||||
#include <tuple>
|
||||
#include "type_traits.h"
|
||||
|
||||
namespace atlas {
|
||||
namespace {
|
||||
|
||||
template<size_t idx, typename Archive, typename ... Elements>
|
||||
void aux_serialize(Archive& ar, std::tuple<Elements...>& t, single_parameter_pack_tag) {
|
||||
ar & std::get<idx>(t);
|
||||
}
|
||||
|
||||
template<size_t idx, typename Archive, typename ... Elements>
|
||||
void aux_serialize(Archive& ar, std::tuple<Elements...>& t, not_single_parameter_pack_tag) {
|
||||
ar & std::get<idx>(t);
|
||||
|
||||
aux_serialize<idx + 1>(ar, t, atlas::is_last_parameter<idx, Elements...>());
|
||||
}
|
||||
|
||||
template<typename Archive, typename ... Elements>
|
||||
void serialize(Archive& ar, std::tuple<Elements...>& t, last_parameter_tag) {
|
||||
ar & std::get<0>(t);
|
||||
}
|
||||
|
||||
template<typename Archive, typename ... Elements>
|
||||
void serialize(Archive& ar, std::tuple<Elements...>& t, not_last_parameter_tag) {
|
||||
aux_serialize<0>(ar, t, std::false_type());
|
||||
}
|
||||
}
|
||||
|
||||
} // atlas
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
template<typename Archive, typename ... Elements>
|
||||
Archive& serialize(Archive& ar, std::tuple<Elements...>& t, const unsigned int version) {
|
||||
atlas::serialize(ar, t, atlas::is_single_parameter_pack<Elements...>());
|
||||
|
||||
return ar;
|
||||
}
|
||||
|
||||
} // serialization
|
||||
} // boost
|
||||
|
||||
#endif /* TUPLE_H_ */
|
||||
50
src/mpi/type_traits.h
Normal file
50
src/mpi/type_traits.h
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
* type_traits.h
|
||||
*
|
||||
* Created on: Apr 1, 2013
|
||||
* Author: vincent
|
||||
*/
|
||||
|
||||
#ifndef ATLAS_TYPE_TRAITS_H_
|
||||
#define ATLAS_TYPE_TRAITS_H_
|
||||
|
||||
//#include <type_traits>
|
||||
|
||||
namespace atlas {
|
||||
|
||||
typedef std::true_type single_parameter_pack_tag;
|
||||
typedef std::false_type not_single_parameter_pack_tag;
|
||||
typedef std::true_type last_parameter_tag;
|
||||
typedef std::false_type not_last_parameter_tag;
|
||||
|
||||
namespace {
|
||||
|
||||
template<typename ... Elements>
|
||||
struct __is_single_parameter_pack_helper {
|
||||
typedef typename std::conditional<1 == sizeof...(Elements), std::true_type, std::false_type>::type type;
|
||||
};
|
||||
|
||||
template<size_t idx, typename ... Elements>
|
||||
struct __is_last_parameter_helper {
|
||||
typedef typename std::conditional<idx + 1 == sizeof...(Elements) - 1, std::true_type, std::false_type>::type type;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
template<typename ... Elements>
|
||||
struct is_single_parameter_pack :
|
||||
public std::integral_constant<bool, __is_single_parameter_pack_helper<Elements...>::type::value>
|
||||
{};
|
||||
|
||||
template<size_t idx, typename ... Elements>
|
||||
struct is_last_parameter :
|
||||
public std::integral_constant<bool, __is_last_parameter_helper<idx, Elements...>::type::value>
|
||||
{};
|
||||
|
||||
// template<typename F, typename ...Args>
|
||||
// struct is_void_call : public std::is_void<std::result_of<F(Args...)>::type>::type {
|
||||
// };
|
||||
|
||||
} // atlas
|
||||
|
||||
#endif /* TYPE_TRAITS_H_ */
|
||||
Loading…
x
Reference in New Issue
Block a user