diff --git a/.github/workflows/test-and-publish.yml b/.github/workflows/test-and-publish.yml index eacf6e2f..a07a7c47 100644 --- a/.github/workflows/test-and-publish.yml +++ b/.github/workflows/test-and-publish.yml @@ -5,6 +5,8 @@ on: branches: [ "dev", "main" ] pull_request: branches: [ "dev", "main" ] + release: + types: [published] workflow_dispatch: permissions: @@ -74,24 +76,34 @@ jobs: - name: Build the Docker image # The metadata inside the image will include the final git commit SHA and the time of the final commit. - # The tag applied to the image will be like spyderisk/system-modeller:- + # If the commit is a tag then the image is also tagged with spyderisk/system-modeller: and the image will include the release number. + # Otherwise, the tag applied to the image will be like spyderisk/system-modeller:- # e.g. spyderisk/system-modeller:dev-20230405T1012 - # Where the timestamp is the time of the final commit in the build. - # In addition, the image is tagged with spyderisk/system-modeller:-latest + # (where the timestamp is the time of the final commit in the build) + # and the image will also be tagged with spyderisk/system-modeller:-latest run: | TAG_ROOT=spyderisk/system-modeller TIMESTAMP=$(git show -s --format=%cI ${GITHUB_SHA}) SHORT_TIME=$(echo ${TIMESTAMP} | sed 's/[-:]//g') REF_END=$(echo ${GITHUB_REF} | sed 's/.*\///') - TAG_DATE=${TAG_ROOT}:${REF_END}-${SHORT_TIME:0:13} - TAG_LATEST=${TAG_ROOT}:${REF_END}-latest - echo "TAG_DATE=${TAG_DATE}" >> ${GITHUB_ENV} - echo "TAG_LATEST=${TAG_LATEST}" >> ${GITHUB_ENV} - docker build --tag ${TAG_DATE} --tag ${TAG_LATEST} --build-arg CI_COMMIT_SHA=${GITHUB_SHA} --build-arg CI_COMMIT_TIMESTAMP=${TIMESTAMP} --file Dockerfile --target ssm-production "." + if [[ ${GITHUB_REF} == refs/tags/* ]]; then + TAG_RELEASE=${TAG_ROOT}:${REF_END} + echo "TAG_RELEASE=${TAG_RELEASE}" >> ${GITHUB_ENV} + docker build --tag ${TAG_RELEASE} --build-arg CI_RELEASE=${RELEASE} --build-arg CI_COMMIT_SHA=${GITHUB_SHA} --build-arg CI_COMMIT_TIMESTAMP=${TIMESTAMP} --file Dockerfile --target ssm-production "." + else + TAG_DATE=${TAG_ROOT}:${REF_END}-${SHORT_TIME:0:13} + TAG_LATEST=${TAG_ROOT}:${REF_END}-latest + echo "TAG_DATE=${TAG_DATE}" >> ${GITHUB_ENV} + echo "TAG_LATEST=${TAG_LATEST}" >> ${GITHUB_ENV} + docker build --tag ${TAG_DATE} --tag ${TAG_LATEST} --build-arg CI_COMMIT_SHA=${GITHUB_SHA} --build-arg CI_COMMIT_TIMESTAMP=${TIMESTAMP} --file Dockerfile --target ssm-production "." + fi - name: Push Docker image to registry run: | docker login -u ${{ vars.DOCKER_HUB_USERNAME }} -p ${{ secrets.DOCKER_HUB_RW_SECRET }} - docker push ${TAG_DATE} - docker push ${TAG_LATEST} - \ No newline at end of file + if [[ ${GITHUB_REF} == refs/tags/* ]]; then + docker push ${TAG_RELEASE} + else + docker push ${TAG_DATE} + docker push ${TAG_LATEST} + fi \ No newline at end of file diff --git a/.reuse/DEP5 b/.reuse/DEP5 new file mode 100644 index 00000000..14567213 --- /dev/null +++ b/.reuse/DEP5 @@ -0,0 +1,8 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: Spyderisk +Upstream-Contact: Spyderisk Team +Source: https://github.com/Spyderisk/system-modeller/src + +Files: src/* +Copyright: 2024 The Spyderisk Licensors +License: APACHE-2.0 diff --git a/.reuse/README.md b/.reuse/README.md new file mode 100644 index 00000000..10cafd80 --- /dev/null +++ b/.reuse/README.md @@ -0,0 +1,17 @@ +# Spyderisk REUSE compliant licensing file + +This is the `.reuse` directory, according to the [REUSE specification](https://reuse.software/spec/). + +As of 2024, there are still files in the Spyderisk source tree which do not have copyright headers, +or have incorrect headers. In the meantime, and for the avoidance of all doubt, and for the benefit +of automated license scanning software, we have created this directory. In time we will have a correct +[SPDX header](https://spdx.dev/use/specifications/) on every relevant file, or in a .licence file next to the file as per REUSE. + +The copyright claimed by the DEP5 file in this directory is Apache2, and we are +quite sure there are no licenses which conflict with this in Spyderisk. In +other words, at worst we have temporarily and legally (but perhaps, impolitely) +relicensed the code of other people. But at least we have audited all code for +the reassurance of all users and licensors - Spyderisk is safe and compliant. + +The Spyderisk Team +2024 diff --git a/CODE-OF-CONDUCT.md b/CODE-OF-CONDUCT.md index e07b8e52..d37d1b9f 100644 --- a/CODE-OF-CONDUCT.md +++ b/CODE-OF-CONDUCT.md @@ -1,8 +1,8 @@ # Spyderisk Code of Conduct -Version 1.0, November 2023 +Version 1.0 -The Spyderisk Project is a friendly community and welcomes contributions. This document outlines both +The Spyderisk Open Project is a friendly community and welcomes contributions. This document outlines both expected and prohibited behaviour. # Short summary @@ -121,14 +121,14 @@ accepted or tolerated. # Reporting If you believe you're experiencing unacceptable behaviour -as outlined above please contact one of the -[current authors in AUTHORS.md](./AUTHORS.md), or send a message to -[code-of-conduct@spyderisk.org](mailto:code-of-conduct@spyderisk.org). +as outlined above please contact one of the current authors +[in the CONTRIBUTORS](./CONTRIBUTORS.md) file, or send a message to +[team@spyderisk.org](mailto:team@spyderisk.org). -You should expect to receive a reply. After determining a precise description of your -situation, the team will review and determine next steps. +You should certainly get a reply. After determining a precise description of your +situation, they will review and determine next steps. -Please also report to us if you observe someone else in distress, or violations of +Please also report to us if you observe someone else in distress or violations of these guidelines. If you feel you have been unfairly accused of violating these guidelines, @@ -136,11 +136,11 @@ please follow the same reporting process.
-This document is (c) 2023 The Spyderisk Authors, under the +This document is (c) 2024 The Spyderisk Licensors, under the [Creative Commons Attribution-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-sa/4.0/) license. -*Heavily Adapted and Compressed from the quite large version 3.1 of the -[Mozilla Participation Guidelines](https://www.mozilla.org/en-US/about/governance/policies/participation/), -released under the same license. We thank Mozilla for their work. First revision and compression done by [LumoSQL](https://lumosql.org).* +*Lightly adapted from the [LumoSQL project](https://lumosql.org), which in turn heavily adapted and compressed +version 3.1 of the [Mozilla Participation Guidelines](https://www.mozilla.org/en-US/about/governance/policies/participation/), +released under the same license.* diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..c2308f22 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,101 @@ +# Contributing to the Spyderisk System Modeller Project + +Welcome! + +We'd love to have your contributions to the Spyderisk System Modeller project. This document is +about our practical principles of working. + +The overall Spyderisk aim is to + +> understand the trustworthiness of socio-technical systems by establishing an international Open Community supporting the research, development, use and support of open, effective, and accessible risk assessment methods, knowledge and tools. + +and you can read more about this in the [general Spyderisk description](https://github.com/Spyderisk/), which explains +who we are and what we do. + +Please read our [Code of Conduct](../CODE-OF-CONDUCT.md) to keep our community approachable and +respectful. + +# Who can contribute? + +We need all the help we can get on the software and computing side +of Spyderisk such as Java, python and web development, system configuration, +software packaging, build/test etc. There is lots of computer science in Spyderisk. + +That said, some of the most important work is not by computer scientists. + +We also need help from: + +* modellers (creating descriptions of real-world situations in a form that Spyderisk can operate on) +* documenters (describing the current state of risk assessment knowledge, and how Spyderisk implements this) +* risk specialists (how can we decide what is important? how do we correctly calibrate our response?) +* ontologists (conceptual understanding of societal goods, risks, threats, harms, attacks, vulnerabilities etc) +* mathematicians (risk modelling methodologies, robustness of calculations etc) +* legal specialists (EU legislation on Cyber Resilience, AI, Medical Devices etc) +* graphic designers (have you seen the corners on our icons??) + +If you are any of the above, including a coder, we would love to hear from you. +Do please drop an email to [team@spyderisk.org](mailto://team@spyderisk.org) +or open a discussion issue on GitHub. + +# Getting started for software developers + +* The [system modeller README](../README.md) explains how to set up the development environment +* Once you have a working local copy of Spyderisk you can run the demonstration System models to get a feel for things +* It is likely that while doing the above you will already have noticed things that need to be fixed. Great! This document shows you how to make these fixes happen, or +* Alternatively, you can find an issue from our [List of Open Issues](https://github.com/Spyderisk/system-modeller/issues) you think you would like to solve, and add a comment to say that you are working on a fix, or +* Create a new query or bug report as described in the following section, and start working on a fix for it +* Whatever you decide to work on, follow the "How to submit a patch" procedure below + +# How to open a query or bug report + +At this stage in our young open project, two things are true: there are many bugs to find, and, very often a problem is because the user does not understand how Spyderisk works. If its the latter, then you have not found a bug. When you have a problem, we recommend you: + +* Open a [new issue in system-modeller](https://github.com/Spyderisk/system-modeller/issues/new) +* Select the template marked "New Spyderisk query". If you are very sure its a bug, select "New Spyderisk bug report" + +# How to submit a patch + +You are about to make us very happy. There are several cases: + +* Documentation fix - [create a fork and send a pull request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork) +* Obvious code fix - create a fork and pull request, just as for documentation +* Any other code fix - please create a query or bug report according to the previous section. It may well be that you have code which is good to go, but in our young open project there is a lot of context that might be relevant to what you wish to do. + +But basically just talk to us using whatever means you are comfortable with, and we will figure it out. + +# Spyderisk project principles + +## Openness first + +* Our software licensing is Apache 2, and analogously for documentation +* Our communication is collaborative and collective +* We build our software around openly published academic theory + +## Version control is mandatory + +* Our software is under public version control. +* Our models expressed in data dumps are also under version control +* We create [PURL permanent URLs for software and documentation](https://purl.archive.org/domain/spyderisk) when there are important new versions. PURL is maintained by [archive.org](https://archive.org) which we hope is stable for the long term +* We have some legacy software outside the system-modeller Git tree which cannot yet be versioned, but we are working hard on that + +## Transparency trumps accuracy + +Spyderisk needs to be both trustable and also to progress quickly. Where there +is incomplete or inaccurate work in the Spyderisk System Modeller code then we document +this with the string: + +``` +WIP: BRIEF TEXT DESCRIPTION, https://github.com/Spyderisk/system-modeller/issues/NNN +``` + +Where "BRIEF TEXT DESCRIPTION" should not exceed a couple of sentences, and NNN +should be the most relevant issue. + + +# Communication with the Spyderisk community + +* tbd + +# Wiki and documentation + +* tbd diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md new file mode 100644 index 00000000..55520ac7 --- /dev/null +++ b/CONTRIBUTORS.md @@ -0,0 +1,56 @@ +# List of Spyderisk contributors + +"Contributors" refers to individual people whose work appears in Spyderisk. There are +two kinds of individual contributors: + +1. Those listed in the [LICENSORS file](./LICENSORS.md), because as owners +they have the right to license their work as open source. Individuals are +listed there alongside organisations including Yahoo!, Microsoft and the University of Southampton. +2. Those who are listed in this file, who have between them contributed many +thousands of lines of code, but who cannot be licensors because they do not own +the code they have contributed because their employer owns the code instead. + +The Spyderisk source trees have been worked on by many people over at least +twelve years at the IT Innovation Centre at the University of Southampton. As +far as we know, anyone who has made commits to one of the Southampton Spyderisk +trees is listed here, some with email addresses that are different from the +ones in the source tree. If you feel you should be listed here but are not, +then please [do get in touch](mailto://team@spyderisk.org). + +See also [MAINTAINERS-OF-SUBSYSTEMS.md](./MAINTAINERS-OF-SUBSYSTEMS.md) which +lists the roles within Spyderisk. + +# Current contributors (who are not licensors) + +* Nic Fair +* Ken Meacham +* Panos Melas +* Stephen C. Phillips +* Samuel Senior +* Dan Shearer on GitHub (also ) +* Paul Smart +* Mike Surridge +* Steve Taylor + +# Spyderisk alumni (who are not licensors) + +We thank you all for your work and look forward to seeing you around. + +* Maxim Bashevoy +* Rayna Bozhkova +* Anna Brown +* Ajay Chakravarthy +* Gianluca Correndo +* Stefanie Cox +* Niall A J. Dickin +* Manny Dinssa +* Vahibav Gohil +* Vadim Krivcov +* Dian Kumanov +* Lee Mason +* Pete Maynard https://github.com/PMaynard +* Ardavan Shafiee https://github.com/bmbigbang +* Josh Tucker +* Toby Wilkinson +* Josh Wright +* Oliver Hayes IT Innovation diff --git a/Dockerfile b/Dockerfile index 5ab00532..e8ddb7ce 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,7 @@ FROM gradle:6.3.0-jdk8 AS ssm-dev # LABELs are added to the image metadata LABEL org.opencontainers.image.vendor="IT Innovation Centre" -LABEL org.opencontainers.image.title="SPYDERISK System Modeller development image" +LABEL org.opencontainers.image.title="Spyderisk System Modeller development image" # Need gradle v6, java v8, python3 and python3-lxml (needed for jacoco2cobertura), killall (from psmisc) RUN apt-get update && apt-get -y install python3 python3-lxml psmisc @@ -41,7 +41,7 @@ FROM ssm-dev AS ssm-build ARG MAVEN_USER ARG MAVEN_PASS -LABEL org.opencontainers.image.title="SPYDERISK System Modeller build image" +LABEL org.opencontainers.image.title="Spyderisk System Modeller build image" # Copy in only the files needed for the build: it's cleanest and it means more cache hits COPY src /system-modeller/src/ @@ -64,11 +64,13 @@ FROM alpine:3.11 AS ssm-production # Build metadata ARG CI_COMMIT_SHA ARG CI_COMMIT_TIMESTAMP +ARG CI_RELEASE LABEL org.opencontainers.image.vendor="IT Innovation Centre" -LABEL org.opencontainers.image.title="SPYDERISK System Modeller" +LABEL org.opencontainers.image.title="Spyderisk System Modeller" LABEL org.opencontainers.image.revision=${CI_COMMIT_SHA} LABEL org.opencontainers.image.created=${CI_COMMIT_TIMESTAMP} +LABEL org.opencontainers.image.release=${CI_RELEASE} ENV SPRING_PROFILES_ACTIVE=production diff --git a/HISTORY.md b/HISTORY.md new file mode 100644 index 00000000..498d3e32 --- /dev/null +++ b/HISTORY.md @@ -0,0 +1,220 @@ +# The story of the Spyderisk Open Project + +Last updated April 2024 + +This document ([HISTORY.md](./HISTORY.md)) was first written quarter-way +through the 21st century, where humanity faces problems of immense complexity. +Many citizens worry they cannot escape the effects of automated systems, and +there are quite rightly responses to these feelings by governments and others in society. +Spyderisk is one response to this, a tool intended to visualise and present a summary +of the risk in systems too complicated for any human to understand or debug. + +# Contents + +* [The journey to openness](#the-journey-to-openness) +* [Threat modelling in cyber-physical systems](#threat-modelling-in-cyber-physical-systems) +* [Evolving the modelling implementation](#evolving-the-modelling-implementation) +* [Much larger systems, and notions of compliance](#much-larger-systems,-and-notions-of-compliance) +* [Trust modelling in sociotechnical systems](#trust-modelling-in-sociotechnical-systems) +* [The role of ontologies, and Spyderisk papers](#the-role-of-ontologies,-and-spyderisk-papers) + +# The journey to openness + +Our Spyderisk story starts in 2006 at the University of Southampton in England, +where a group gathered themselves under the name +[IT Innovation Centre](https://www.it-innovation.soton.ac.uk/) and +successfully bid for research funding to solve problems related to complex +computer systems. Starting with GRID computing and then leading on to security, +privacy, health data and bioinformatics, the Centre kept investing in software +tools for assessing the risk in complex systems. And while software always gets +out of date, even more importantly the Centre developed a database of knowledge +about risk assessment. + +It became clear that risk assessment is being taken seriously throughout +society. From legislation +[regulating the use of AI in our daily lives](https://www.europarl.europa.eu/topics/en/article/20230601STO93804/eu-ai-act-first-regulation-on-artificial-intelligence), +to the need to [balance conflicting requirements for climate change](https://doi.org/10.1038/s44168-023-00078-x), +there is a strong desire to understand what sorts of risks are involved. And while it seems +counter-intuitive that we can make judgements about risks even without +understanding how these complex systems work, that does seem to be true. + +The IT Innovation Centre realised that it had 15 years of study that could be +used to seed a new community of automated risk assessment. + +In 2023 the Spyderisk open project was founded, with the IT Innovation team +aiming for involvement and practical applications far beyond Southampton. All +the software tools and databases were published to the world under open +licences, together with [copies of all academic output](./docs/papers/README.md). + +Spyderisk is a collaborative effort and we [invite anyone interested in contributing](./CONTRIBUTING.md) +to this field to join in. The rest of this document explains how we got here, +but you might prefer to start with the [general project introduction](./README.md). +There have been many [contributors so far](./CONTRIBUTORS.md) and we look forward to +welcoming many more. + +# Threat modelling in cyber-physical systems + +Engineer and mathematician [Mike Surridge](https://www.southampton.ac.uk/people/5wyvsw/professor-mike-surridge#research) +lead a team developing concepts in [grid computing](https://en.wikipedia.org/wiki/Grid_computing). +[Michael Boniface](https://www.southampton.ac.uk/wsi/about/staff/mb.page) and others implemented the +[GRIA distributed computing system](https://web.archive.org/web/20110726132409/http:/www.gria.org/), +and presented an early paper on +[semantic risk management tools for adaptive security](./docs/papers/boniface2010.pdf). GRIA was open source, +and it demonstrated that assessing risk in a dynamic computer system is a difficult problem. + +In 2009 Ajay Chakravarthy implemented security models in the +[Semantic Web Rule Language (SWRL)](https://protege.stanford.edu/conference/2009/slides/SWRL2009ProtegeConference.pdf) +to create the first version of the Spyderisk System Modeller reasoner. The introduction +of semantic web approaches brought with it a need for an [ontology](https://en.wikipedia.org/wiki/Ontology_(information_science)), +which is a formal description of knowledge. An ontology defines objects and the relationships between objects. +This was the beginning of the Spyderisk knowledgebase. + +So far, all of the co-operating systems involved were other computer systems, +and if something went wrong in the end it was always possible to restart the +compute workloads. Then we started to consider systems with direct impacts on +the real world, where restarting is not an option. This is in the area of +[cyber-physical systems](https://en.wikipedia.org/wiki/Cyber-physical_system). + +> Mike and the team observed that the level of risk between multiple + cyber-physical systems increases with their connectivity - even if + the intention of the connectivity is to share information to reduce risk. + +This was really not what was expected with increased information sharing. + +This work lead to a 2010 EU project to design a system to examine data flows between +[organisations in the air transport industry](./docs/papers/surridge2010.pdf), which is a +complex always-on network where restarts are not possible. The industry had been increasing +connectivity between air traffic control and the airport operators, which we showed +caused the opposite of what was intended - risk increased, and the chances of smooth operations +reduced. + +# Evolving the modelling implementation + +For the first time, the group developed interactive software to visualise +relationships and risk. The implementation used stochastic methods derived from +[Markov queuing theory](https://en.wikipedia.org/wiki/Markovian_arrival_process) to explore +how the objects and relationships defined in the knowledgebase could express risk over the whole +system, depending on the likelihood of various chains of events occurring. +We used our demonstration system to present a [followup 2011 paper](./docs/papers/surridge2011.pdf) +where we compared the stochastic method to +[discrete simulation](https://en.wikipedia.org/wiki/Discrete-event_simulation) and concluded that our +modelling approach gave results that could be very useful in the real world. + +Everything until this point was in the field of [Secure Systems Modelling](./docs/papers/surridge2011-1.pdf), +that is, secure IT systems which impact on the physical world such as covered in the +[ISO2700 series](https://en.wikipedia.org/wiki/ISO/IEC_27000-series). We learned how +to effectively model dependencies in the tree of risks, observing that just one +change in risk (for example, a problem being fixed) can result in a very +different and sometimes surprising change in the overall risk assessment after +recalculation. We concluded that ISO27000-type approaches are unsuitable +for unpredictable and unstable systems of systems such as are found in the commercial +airline industry. + +Our knowledgebase was starting to become a useful applied tool, as we said in 2011: + +> Improvements to the core ontology (below) allow us to + model physical and electronic attacks on airport connectivity and spaces. + These include the ability of actors (including intruders) to move around the + airport, the use of private networks to support communications with and in the + airport, and the potential for physical or electronic attacks on communication + assets as well as services that use them. + +![Core ontology for airport modelling](./docs/images/serscis-base-ontology.png "Core ontology for airport modelling") + +Over time, as we refined our software tools, we began to move towards +cause-and-effect modelling as opposed to probabilistic modelling. Spyderisk +today uses fuzzy logic and inference to determine how likely it is that +different threats and their consequences may occur, based on the different +fuzzy inference rules that describe what can happen in a given system. These +fuzzy rules are based in Bayesian probability and allow Spyderisk to perform +efficient risk assessments based on the approximate fuzzy information that is +typically available in cyber-physical systems. + +# Much larger systems, and notions of compliance + +Mobile telephone networks are highly regulated just like the airline industry, +because they are safety-critical. Similarly with anything to do with handling health +data, and as we delivered risk assessment solutions for these industries we began to +consider compliance. + +> A key insight was viewing compliance requirements as threats, + which then meant our one modelling approach could capture the + security and safety aspects of an entire system of systems. + +The ICT systems used in mobile networks are exceedingly complicated, as we +discovered in a [2018 project modelling threats in 5G networks](./docs/papers/surridge2018). +There can be hundreds of different computer systems +involved from unrelated companies, and many stakeholders - not least the +individual end users. A threat to one stakeholder requires responses from +other stakeholders because no one stakeholder has all power or rights. We +cannot know all the details of the individual industries, which is why we made +Spyderisk modelling more accessible. We need more experts in specific domains +to assist with developing new models, as we experienced again when delivering +a [2019 project for sharing health data](./docs/papers/surridge2019), and another as part +of the [RESTASSURED Secure Cloud Data Processing](https://cordis.europa.eu/project/id/731678) project. + +These projects in 2019 led to the first modern versions of the system modeller +(or ssm, now called simply "Spyderisk".) With our approach of treating +non-compliance as a threat, we were able to model privacy in terms of the EU +GDPR for the first time, helping healthcare providers assess their data +handling processes. + +Our deliverables for the EU [FogProtect project](https://fogprotect.eu/) in +[2020](./docs/papers/surridge2020.pdf) and [2022](./docs/papers/taylor2022.pdf) +addressed the security and risk management of systems spanning the cloud and +[fog](https://en.wikipedia.org/wiki/Fog_computing), where there could be many +fog nodes to handle. For this the system modeller was enhanced to efficiently +handle large numbers of assets through treating them as distributions of +behaviours and considering the important cases, which were: the average +behaviour of the population, the behaviour of the best member, and the +behaviour of the worst member. + +# Trust modelling in sociotechnical systems + +So far we had mostly dealt with aspects of security in cyberphysical systems. +The problem with a cyber-physical approach is that it tends to be less +concerned with the complexities of the humans who interact (or choose not to +interact) with the systems. Over time we broadened our approach, as concerns +increased in society generally. This involves more nuanced and subtle questions +related to humans and how they feel, including: + +* trust - do groups of people feel this system is one they want to use and recommend? +* privacy - this is a complex intersection of facts and law, and intimate feelings of violation +* safety - how does a system meet safety requirements, and does it also make people feel safe? + +In 2019 we explored this more [human view of health data risks](./docs/papers/pickering2019) from a psychological perspective. + +At a policy level (that is, the things that governments and large organisations +in society are concerned with) we are seeing laws that talk about "Trustworthy +systems" and "Responsible AI". Spyderisk is interested in how to assess +concepts of trust and responsibility, because not only does nobody fully +understand the systems these terms are applied to, but they also involve +aspects of psychology and sociology. For example, a system may be objectively +safe in a technical sense, but people could still have good reasons not to +trust it, which may well cause other problems. All of this can be associated +with harms, and thus Spyderisk can potentially assess the likelihood of these harms occurring. + +We want Spyderisk to become increasingly good at modelling these systems, +visually present the harms to users, and demonstrate what happens as the user +changes the model for different scenarios. + +# The role of ontologies, and Spyderisk papers + +Since 2009 Spyderisk has based its reasoning about risk on an underlying ontology. +Technical details of the ontological approach are in the +[Spyderisk onotologies documentation](./docs/ontologies.md), but in terms of Spyderisk +history this is also important. The Spyderisk Open Project aims not only to produce a +tool widely used for risk assessment, but to collect the knowledge of experts engaged in +risk assessment generally. These experts may never use any Spyderisk software, but we are +all working in the same general ontological areas, and we all need a common vocabulary. + +Spyderisk combines a domain ontology with an application-level ontology, but +does not have an upper-level ontology. The Spyderisk ontology continues to be +developed, and research specifically aimed at improving ontologies in various +Spyderisk applications and domains. Since 2023 the team has collaborated on +three papers related to ontological challenges in Spyderisk: + +* [Secure Ontologies for Internet of Things Systems (SOfIoTS)](./docs/papers/smart2023.pdf) +* [Biomedical Burden Ontology: Ontology Documentation](./docs/papers/smart2024.pdf) +* [The Ethics of the Extended Mind: Mental Privacy, Manipulation and Agency](./docs/papers/smart2024-2.pdf) + diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 00000000..e7078e04 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,223 @@ +# Spyderisk System Modeller installation + +## Pre-requisites + +You will need `git`, `git-lfs`, `docker` and `docker-compose`. See below for +more detail. + +## Quick Start + +The following instructions assume that you have the pre-requisites installed +and working, and have cloned the repository. + +N.B. Prior to running the following commands, you should also ensure that you +have one or more knowledgebases (domain models) available for installation. +These are available as zip file "bundles", containing the domain model itself, +along with the icons and mapping file needed for generating a UI palette of +visual assets. + +An example knowledgebase is available at: + Here, you will +find the latest .zip bundle, at the bottom of the "Assets" list. This file +should be downloaded and copied into the system-modeller/knowledgebases folder. +Once Spyderisk has been started up (see instructions below), these zip files +will be automatically extracted and loaded into Spyderisk. + +Of course, you may choose not to install a default knowledgebase, however, when +the Spyderisk GUI first loads in your browser, you will be directed to load in +a new knowledgebase manually. + +1. `$ cd system-modeller` +2. `$ docker-compose up -d` +3. `$ docker-compose exec ssm bash` +4. `$ ./gradlew assemble bootTest` +5. Go to in your browser. +6. Login in using `testuser` or `testadmin` with password `password`. + +N.B. The links in the user interface for the attack graph image does not work in the development environment. Links to Keycloak account management functions and documentation do not work via port 8081 but do work via port 8089. + +Please also note that the default setup is to recreate all databases on initial +start-up. In order to persist any installed knowledgebases and created system +models, you should ensure that `RESET_ON_START=false` in your `.env` file, prior to re-running `./gradlew assemble +bootTest`. + +## Installing Docker + +Please see the [Docker website](https://www.docker.com/) for details. + +### Windows + +We assume WSL2 (Windows Subsystem for Linux v2) is installed. If you do not install WSL2, +you will need run Linux within a virtual machine, or switch to running Linux instead of Windows. + +Download and install the closed-source application "Docker Desktop" for Windows. + +Docker Desktop integrates with WSL2 (Windows Sub-system for Linux v2). WSL2 +provides a Linux environment deeply integrated into Windows. As many +development tools are designed for Linux, using WSL2 can make things easier. +Docker provide [instructions for the Docker Desktop WLS2 +backend](https://docs.docker.com/docker-for-windows/wsl/) which should be +followed. + +As part of the WSL2 installation, [you choose and install a Linux distribution +to +use](https://docs.microsoft.com/en-us/windows/wsl/install-win10#step-6---install-your-linux-distribution-of-choice). + +Once WSL2 and Ubuntu are installed, open a terminal window of some sort and +type `wsl` to switch to your default WSL2 Linux distro. You will need to copy +your private SSH key into the `.ssh` folder in the distro. You can access your +`C:` drive with the path `/mnt/c`: + +```shell +wsl +cd +mkdir .ssh +chmod 700 .ssh +cp /mnt/c/path/to/your/key/id_rsa .ssh +chmod 600 .ssh/id_rsa +``` + +You may also want to limit the host resources that Docker Desktop is permitted +to use. This can be done with a `.wslconfig` file in your `%UserProfile` +folder, e.g.: + +``` +[wsl2] +memory=10GB +processors=8 +``` + +At this point you have a functional Linux system. Please skip to the Linux +sub-section for the rest of the instructions. + +### Linux + +Many Linux distributions already have Docker installed. The following command +has been tested in `apt` based systems such as Ubuntu. To install Docker: + +```shell +sudo apt-get install docker docker-compose +``` + +## Docker Concepts + +Docker manages individual containers and its configuration files by default are +called "Dockerfile". It is possible to manually orchestrate (and connect) +docker containers but it is easier to manage multiple containers using +"docker-compose". Docker Compose files by default are called +"docker-compose.yml" and they refer to docker images (either to be pulled from +a docker registry) or to local Dockerfiles. + +### Images + +Images are the equivalent of VM images. They are built in layers on top of +standard distributions (e.g. from Docker hub). Names and tags (e.g. +"postgres:9.6.19" or "mongo:latest"). Be careful with the "latest" tag as use +of it does not guarantee that you are actually getting the latest version - it +is just a string like any other. By convention it will be the latest but +perhaps not if it is retrieved from your local cache. + +Commands: + +* List all local images with `docker image ls` + +### Containers + +Containers are running instances of images. They can be paused, unpaused, +stopped and started once created or just destroyed and recreated. The state of +a container changes as it runs with processes writing to disk and updating +memory. Writing to disk creates a new layer in the image by default or changes +a persistent "volume" if it is defined (see below). If a container is paused +then all the processes are paused (with `SIGSTOP`) and can be resumed but if a +container is stopped (`SIGTERM` then `SIGKILL`) then the memory state is lost. + +Commands: + +* List containers that relate to the local `docker-compose.yml` file with + `docker-compose ps` +* List running containers with `docker container ls` or just `docker ps` +* List all containers with `docker container ls -a` or just `docker ps -a` +* Remove the containers that relate to the local `docker-compose.yml` file with + `docker-compose rm` +* Remove a container with `docker container rm ` +* Remove containers that are not running with `docker container prune` (be + careful!) + +### Volumes + +There are two main sorts of volume: + +1. where a folder from the host is mounted in the container (a "bind mount"); +2. where a volume is created in a private space used by Docker. There are + "named volumes" and "anonymous volumes" of this type. + +We use (1) to mount the source code from the host into the container so that +editing can be done on the host and building in the container. Volume type (2) +is used for folders in the container where the contents changes such as the +`build` folder and gradle cache. All volumes are persisted separately to the +container. Anonymous volumes are given enormous random identifiers so they are +hard to identify. Named volumes can be shared between two different executing +containers and can be reused between container instantiations. Anonymous +volumes are not reused, they are left as orphans when a container is destroyed. + +Named volumes for the databases are defined in `docker-compose.yml`. They are +called `jena`, `mongo-db` and `mongo-configdb`. When docker-compose creates the +volumes, it prefixes those names with the "project name" which by default is +the name of the folder containing the `docker-compose.yml` file. Therefore the +volume names are likely to be e.g. `system-modeller_jena`. + +**N.B. if you have two system-modeller folders with the same name then they +will end up using the same named volumes: this is almost certainly not what you +want.** + +Named volumes for build artifacts are defined in the +`docker-compose.override.yml` file. They cover the `gradle`, `npm` and `maven` +artifact folders. + +Commands: + +* List all volumes with `docker volume ls` +* Remove a volume with `docker volume rm ` +* Remove all volumes that are not used by a container (running or not) with + `docker volume prune` (it is sensible to do this periodically to save disk +space) + +### Networks + +Networks provide communication between containers. + +### Container registry + +Images (not containers actually) are stored in "registries". There is a cache +for images on your local machine and there are remote registries. We use Docker +Hub for standard images. + +## Use of Docker + +Docker: manages single containers. We have a multi-stage `Dockerfile` which +defines three different containers for the SSM: + +* The `ssm-dev` container holds the development environment (including Java, + Gradle, Maven, etc). It is the one used by developers who will be building +the SSM themselves with `gradle` commands. The basic strategy is to keep the +source code files on the host and mount them into the SSM container. In this +way, the build tools are isolated from the host but the source code can still +be edited easily in any editor on the host. +* The `ssm-build` container executes a clean build of the SSM and is used by + the CI system. +* The `ssm-production` container is created by the CI and is a light-weight + container with just the software necessary for executing the SSM. It can be +used for demos and is intended for "production" deployment. The "production" +image is built on any branch that the CI builds (e.g. master and dev). The +"production" image is used in the separate "system-modeller-deployment" +project. + +Docker-compose: orchestrates multiple containers. We use several files: + +* The `docker-compose.yml` defines the base orchestration of the SSM + development container with supporting services (e.g. a MongoDB container). +* The `docker-compose.override.yml` file is (by default) overlayed on top of + `docker-compose.yml` and adds in the development environment. +* The `docker-compose.test.yml` file is used (primarily by the CI pipeline) to + execute the integration tests. + diff --git a/LICENSE.md b/LICENSE.md index 4fa9ffcd..acb0277d 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -2,55 +2,66 @@ Spyderisk is licensed under the Apache 2.0 license. -The [README in the licenses directory](./LICENSES/README.md) explains how to apply -license headers to files in Spyderisk. +The [README in the licenses directory](./LICENSES/README.md) explains *how* developers should apply +license headers to files in the Spyderisk System Modeller. This document is about the *why and what* of licensing. The site [TL;DR Legal](https://www.tldrlegal.com/license/apache-license-2-0-apache-2-0) summarises the Apache license as: > You can do what you like with the software, as long as you include the required notices. > This permissive license contains a patent license from the contributors of the code. +For most people most of the time, this is all you need to know - please use and enjoy Spyderisk! + # Spyderisk Open Project Copyright and Licenses -Regardless of who owns contributions, Spyderisk source code is entirely -licensed under either the Apache2 license or (in some cases) licenses which are -compatible with Apache2. A complete list of licenses is in the README file in -the directory [```LICENSES/```](./LICENSES/README.md), which also contains the text of these licenses and -discussion about the Spyderisk policies and practices on incorporating open source -code developed outside Spyderisk. - -As of 2023, the copyright of the code in all Spyderisk source trees -is owned by either the individual authors, or, quite often, the University of -Southampton ("Soton"). The IT Innovation Centre of the University of Southampton -created nearly all Spyderisk code before it was open sourced in 2023. For any -new contributions - and we hope there will be many - we recommend the original author retain -copyright ownership as an individual. If you work for an organisation and are -unsure of who owns your output while at work, see your employment contract and -ask your management. Many files simply have "Copyright the Spyderisk Authors" at the -top as well as the name of the original author. This is a reference to the file -[```AUTHORS.md```](./AUTHORS.md). - -Spyderisk documentation and configuration files are generally under a Creative Commons -license, again explained in [```LICENSES/```](./LICENSES/README.md). It is not necessary -for every file to have a copyright notice, but in Spyderisk we do insist that all source code -files do. +Spyderisk source code is available entirely under [Open +Source](https://opensource.org/osd) licenses, either the [Apache2 +license](./LICENSES/APACHE-2.0.txt) as described above, or +for documentation, the [Creative Commons CC-by-SA](./LICENSES/CREATIVE-COMMONS-BY-SA-4.0.txt). + +We use two standards to maintain copyright and licensing of all artefacts in the Spyderisk system modeller project: + +* The [REUSE](https://reuse.software/spec/) high-level system of files and directories regarding licensing +* The [SPDX software component descriptors](https://spdx.dev/), which are Software Bill of Materials (SBOM) system + +Both of these standards can be read by humans and machines, so Spyderisk is +compatible with various automated due diligence systems. + +# Licensor or Contributor? + +The distinction matters legally, but in the day-to-day we just want to +acknowledge the work done by many people over the years. Only someone who owns +code has the right to license that code. Many substantial Spyderisk +contributors commit their work but do not own their contributions because of +their employment contract, and therefore they cannot be a licensor. + +Many Spyderisk source files simply state "Copyright the Spyderisk licensors" at +the top in the manner specified by the SPDX standard, where the +owners/licensors are listed in the [LICENSORS file](./LICENSORS.md). This is +usually followed by the statement "Original by A. Person", where "A. Person" is +listed in the [CONTRIBUTORS file](./CONTRIBUTORS.md), or occasionally in +LICENSORS if they are in fact also owners. + +# Background + +Without repeating the detailed [HISTORY file](./HISTORY.md), Spyderisk licensing +is explained by its history: + +* In 2023, when all source code was open sourced, the University of Southampton (Soton) was the main copyright licensor +* There were many individual code contributors employed by Soton who were and remain Spyderisk authors, but all of their work in Spyderisk while being Soton employees is owned by Soton. These authors (called "contributors" to avoid confusion) are therefore not copyright licensors +* A small proportion of the Spyderisk code has been incorporated from other open source projects, and remains copyrighted by its respective owners/licensors +* Any future contributors to Spyderisk who are not Soton employees will own their contributions, and so they will be both authors and owners/licensors +* Some future contributors may be in a similar situation to Soton employees, and their employer will own all their Spyderisk contributons. We would respectfully request that contributors check with their employer to see if they have the right to contribute individually, because we think that is better for the project overall. # No CLA -Spyderisk does *not* and will not have a Contributor License Agreement (CLA), -for reasons similar to [Red Hat](https://opensource.com/article/19/2/cla-problems), +As a matter of policy, Spyderisk does not and will not have a Contributor License Agreement (CLA), +for reaons similar to [Red Hat](https://opensource.com/article/19/2/cla-problems), the [Software Freedom Conservancy](https://sfconservancy.org/blog/2014/jun/09/do-not-need-cla/) and -other leading open source voices. - -We adhere to the "inbound = outbound" open source principle, which means: -* each code contributor (ie inbound) has the same rights as every other code contributor. -CLAs often grant additional rights to one particular contributor, which in -the case of Spyderisk would be our generous founding donor, the University of -Southampton. We have chosen not to do this. -* each code user (ie outbound) has exactly the same rights to use Spyderisk source code -as every code contributor. - -Spyderisk uses the excellent Apache 2 license from -[apache.org](https://apache.org), but apache.org itself is an organisation serving -large companies, and we do not use their CLAs. We are not affiliated with -apache.org in any way. +other leading open source voices. + +Spyderisk adheres to the "inbound = outbound" principle, where Licensors get +exactly the same rights as anyone else in the world. While we use the excellent +Apache 2 license from [apache.org](https://apache.org), Spyderisk is not +affiliated with apache.org, and we do not use the Apache CLAs or other tools +which do not share rights equally with all. diff --git a/LICENSES/Apache-2.0 b/LICENSES/APACHE-2.0.txt similarity index 100% rename from LICENSES/Apache-2.0 rename to LICENSES/APACHE-2.0.txt diff --git a/LICENSES/BSD-3-CLAUSE.txt b/LICENSES/BSD-3-CLAUSE.txt new file mode 100644 index 00000000..b91bbd89 --- /dev/null +++ b/LICENSES/BSD-3-CLAUSE.txt @@ -0,0 +1,9 @@ +Copyright (c) . + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/LICENSES/CC-BY-SA-4.0 b/LICENSES/CREATIVE-COMMONS-BY-SA-4.0.txt similarity index 100% rename from LICENSES/CC-BY-SA-4.0 rename to LICENSES/CREATIVE-COMMONS-BY-SA-4.0.txt diff --git a/LICENSES/ISC.txt b/LICENSES/ISC.txt new file mode 100644 index 00000000..5ee61d7e --- /dev/null +++ b/LICENSES/ISC.txt @@ -0,0 +1,5 @@ +Copyright + +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED “AS IS” AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/LICENSES/MIT b/LICENSES/MIT.txt similarity index 88% rename from LICENSES/MIT rename to LICENSES/MIT.txt index 77738a2d..469a7baf 100644 --- a/LICENSES/MIT +++ b/LICENSES/MIT.txt @@ -1,7 +1,8 @@ (the following text is the MIT Expat license, as used in some front-end library components of Spyderisk. For more information about the variations in the MIT license see https://en.wikipedia.org/wiki/MIT_License#Variations . In summary, -this is the most-used license anywhere, and its terms are well-understood.) +the MIT family of almost-identical licenses is the most-used anywhere, and its +terms are well-understood. Most people just call all of these licenses "MIT".) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/LICENSES/README.md b/LICENSES/README.md index 4cefcd90..3d56155f 100644 --- a/LICENSES/README.md +++ b/LICENSES/README.md @@ -1,24 +1,44 @@ -# Licenses in Spyderisk and how to apply them +# Licenses in the Spyderisk System Modeller and how to apply them -We apply licenses to all intellectual property in the Spyderisk project, unless -it already comes from some other authors and already has a license. +This is both a policy document and a practical how-to. The technical details of +licensing can be complicated, but Spyderisk licensing is easy if you follow +these basic rules. If you have any questions do please ask +[team@spyderisk.org](mailto://team@spyderisk.org). -There are four types of intellectual property in Spyderisk: +Licenses apply to all intellectual property in the Spyderisk project. +We apply licenses in the ways specified in +the [REUSE](https://reuse.software/spec/) specification of files and directories. Within +individual source files, according to the +[SPDX software component Bill of Materials](https://spdx.dev/) specification. For code +that we create, we choose the license. For third-party code, we use whatever license +was chosen for that code (assuming it is compatible with Spyderisk at all - otherwise +we couldn't use that third-party code!) + +There are four types of intellectual property created by Spyderisk project members +specifically for including in Spyderisk: * software source code * documentation, including images and other media -* configuration files and examples, which we regard the same as documentation -* third-party contributions for each of the above three types +* academic papers and reports +* configuration files and examples, which we choose to regard the same as documentation + +A fifth type of intellectual property is that created by external third-party +contributors who have probably never even heard of Spyderisk, of any of the +above four types. We already use a lot of such code to avoid re-inventing existing +functionality, all of which is compatible with our licensing policy and some of which +is not under the Apache license. We currently use these licenses in Spyderisk: -* *Apache 2* nearly all code. -* *Creative Commons By-SA 4.0* Eventually all documentation will be copyright CC By SA. -* *MIT* A few third party front-end elements including the Bootstrap and JQuery libraries) +* *[Apache 2](./APACHE-2.0.txt)* for nearly all code, including all code created specifically for Spyderisk +* *[Creative Commons By-SA 4.0](./CREATIVE-COMMONS-BY-SA-4.0.txt)* for all new documentation, and eventually all documentation will be copyright CC By SA unless it was created by someone +* *[MIT](./MIT.txt)* Some third party front-end elements (including the Bootstrap and JQuery libraries) +* *[ISC](./ISC.txt)* and *[BSD 3-Clause](./BSD-3-CLAUSE.txt)* for some other third-party code -We are happy to consider any useful third-party code or documentation for inclusion in Spyderisk -provided it is under a compatible license. There is occasionally some nuance to -what "compatible license" means, as described below, but this is our general outlook. +As you can see, Spyderisk is happy to consider any useful third-party code or +documentation for inclusion in Spyderisk provided it is under a compatible +license. There is occasionally some nuance to what "compatible license" means, +as described below, but this is our general intention. # Apache 2.0 license - default for source code @@ -27,14 +47,12 @@ So long as the third party code has a license compatible with the [Open Source Definition](https://opensource.org/osd/) then it will not conflict with the Apache 2.0 license and we can freely use it. -https://www.apache.org/licenses/LICENSE-2.0.txt - In order to apply the Apache license to a source code file in the Spyderisk -project, insert this at the top within an appropriate comment block for the language -you are using, replacing the text in [brackets] with the correct values. +project, insert the following comment block at the top, replacing the text in +[square brackets] with the correct values. ``` -Copyright [YEAR] The Spyderisk Authors +Copyright [YEAR] The Spyderisk Licensors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,20 +67,19 @@ Copyright [YEAR] The Spyderisk Authors limitations under the License. - + - + ``` # Creative Commons BY-SA - documentation and config files -We have decided not to apply copyright headers to README files such as this one, because -we do not actually have to brand every file. We do not, for example, add copyright headers -to images, we just make a statement in a file about all the images. However most text forms -of documentation do have explicit CC BY-SA license at the top. - -https://creativecommons.org/licenses/by-sa/4.0/deed.en +We have decided not to apply copyright headers to README files such as the one you are reading, because +the REUSE standard already brands every file, and it would just be messy and distracting. +Similarly we do not add copyright headers to images, we just make a statement in a file +covering all the images. However most non-Markdown forms of documentation do have explicit CC BY-SA +license at the top. ``` Copyright 2023 The Spyderisk Authors @@ -71,7 +88,7 @@ Copyright 2023 The Spyderisk Authors - + ``` # What about third-party GPL code? @@ -83,19 +100,21 @@ We cannot use GPLv2 licensed code because it is the one major open source licens Spyderisk uses the JSPlumb library, which is dual-licensed under MIT and GPLv2, and we choose to use it under the MIT license so there is no conflict. -... and Maybe. +... and also *Maybe*, perhaps. -The GPLv3 is compatible with Apache 2.0, but only in one direction. After the -two codebases are combined, the result can only be distributed under the GPL -(again, there is some nuance but this is approximately correct.) In the -theoretical case of a significant piece of third party software only being -available under the GPLv3, AGPLv3 or LGPLv3, then the whole of Spyderisk would -be distributed under the GPL terms. +Unlike version 2, the [GPLv3](https://www.gnu.org/licenses/gpl-3.0.txt) is +compatible with Apache 2.0, **but only in one direction**. After codebases +under these two licenses are combined, the combined result can only be +distributed under the GPLv3 (again, there are some additional but this is +approximately correct.) In the theoretical case where the Spyderisk Project +decided to mix its code with a significant piece of third party software which +is only available under the GPLv3, AGPLv3 or LGPLv3, then the whole of +Spyderisk (when compiled) could only be distributed under the GPLv3 terms. That is certainly possible, but it would be a big change and not what was intended at the time Spyderisk was placed under Open Source licences by our generous founding donor, the University of Southampton. In that unexpected case, we might even consider re-licensing to GPLv3. -Until that time, we will review very carefully any proposed imports of GPL code +Until that time, we will review very carefully any proposed imports of GPLv3 code into the tree. We're not saying "no", but we will be cautious. diff --git a/LICENSORS.md b/LICENSORS.md new file mode 100644 index 00000000..f096af53 --- /dev/null +++ b/LICENSORS.md @@ -0,0 +1,105 @@ +# List of Spyderisk copyright licensors/owners + +Every individual and organisation listed here has chosen to license their work +to the world under Open Source terms and conditions. We have incorporated that +work into Spyderisk. Thank you. + +If you feel you should be listed here but are not, then please +[do get in touch](mailto://team@spyderisk.org). + +Each owner/licensor is listed with an indication of at least one of their +contributions, however, this is not intended to be authoritative or exhaustive. +Refer to the Spyderisk source tree for exact details. Only the first +contribution of an Owner is noted here, just as a handy hint. Some Owners have +dozens of contributions. + +Note: authors of many thousands of lines of Spyderisk code are not listed here, +because their employment contracts mean they are not Owners. They deserve just as +much recognition, and are listed in [CONTRIBUTORS.md](./CONTRIBUTORS.md). + +Corporate owners/licensors: + +* University of Southampton +* OpenJS Foundation (formerly JS Foundation) and other contributors, https://openjsf.org/ (JQuery and other components) +* Facebook, Inc. and its affiliates (the React framework) +* Microsoft Corporation (Typescript framework) +* The Obvious Corporation (WebAssembly tools) +* Fair Oaks Labs, Inc (IEEE 754 floating point tools) +* caniuse.com (browser functionality database) +* Made With MOXY Lda (node-cross-spawn library) +* The cheeriojs contributors (JS domain handling code) +* Joyent Inc (Chrome event tracer) +* Yahoo! Inc (hoist-non-react-statics) +* whitequark (ipaddr.js) + +Individual owners/licensors: + +* Eemeli Aro (yaml parser) +* Nicolò Ribaudo and other contributors (Babel) +* Roman Dvornov (discoveryjs) +* Justin Ridgewell (sourcemap) +* Rich Harris (sourcemap-codec) +* Sven Sauleau (WebAssembly tools) +* Mauro Bringolf (WebAssembly tools) +* Jonathan Ong (jshttp) +* Douglas Christopher Wilson (jshttp) +* Sindre Sorhus (JS file handling) +* Elan Shanker (JS string handling) +* Paul Miller https://paulmillr.com (JS string handling) +* Blake Embrey (JS string handling) +* Andrey Sitnik (CSS handling) +* Matt Zabriskie (promised-based http client) +* Luís Couto (Babel) +* Jon Schlinkert (path handling) +* Linus Unnebäck (buffer-from code) +* TJ Holowaychuk (string conversion) +* Jed Watson (string conversion) +* Jordan Harband (JS call binding) +* JakubPawlowicz.com (CSS cleaner) +* Luke Edwards https://lukeed.com (Klona and Escalade) +* Heather Arthur (Color convert) +* Dmitry Ivanov (Color name) +* Jorge Bucaran https://jorgebucaran.com (Colorette) +* Ben Ripkens http://bripkens.de (Connection history) +* Douglas Christopher Wilson (Content disposition and Content type code) +* Thorsten Lorenz (convert-source-map) +* Roman Shtylman (cookie handling) +* Denis Pushkarev (core-js) +* David Clark (cosmicconfig) +* Felix Böhm (css handling code) +* Fredrik Nicol (css handling code) +* TJ Holowaychuk (js debugging code) +* Josh Junon (js debugging code) +* Aria Minaei (js dom convertor) +* Jason Quense (domain helpers for React and Bootstrap) +* Kilian Valkhof (electron to chromium convertor) +* Kiko Beats (emoji list) +* Trevor Brindle (environment info library) +* JD Ballard (node error handling) +* Stefan Penner and contributors (EMCA v6 promise handler) +* Andreas Lubbe (HTML escaping code) +* Tiancheng "Timothy" Gu (HTML escaping code) +* Arnout Kazemier (eventemitter3 library) +* Evgeny Poberezkin (JSON schema code) +* Ramesh Nair, http://www.hiddentao.com/ (fast levenshtein) +* Caio Gondim (fast memoize) +* Kasper Unn Weihe (fastest levenshtein) +* Jon Schlinkert (string handling) +* Robert Eisele (fraction.js) +* Raynos (function bind) +* Logan Smyth (gensync) +* Elan Shanker (globbing code) +* Isaac Z. Schlueter (node graceful-fs) +* Dulin Marat (html-entities) +* Juriy "kangax" Zaytsev (html minifier-terser) +* Chris Winberry (html parser2) +* Charlie Robbins (node html proxy) +* Jarrett Cruger (node html proxy) +* Steven Chim (http proxy middleware) +* Alexander Shtuchkin (iconv lite) +* Glen Maddern (icss-utils) +* Lee Byron (js-immutable) +* Tyler Kellen (gulp.js interpreter) + + + diff --git a/MAINTAINERS-OF-SUBSYSTEMS.md b/MAINTAINERS-OF-SUBSYSTEMS.md new file mode 100644 index 00000000..7ce7ab2f --- /dev/null +++ b/MAINTAINERS-OF-SUBSYSTEMS.md @@ -0,0 +1,14 @@ +# Subsystem Maintainers + +This is a list of who is responsible for which subsystem within the Spyderisk Open Project, including +within the system-modeller Git tree. Contacts for the maintainers are in the +[CONTRIBUTORS.md](./CONTRIBUTORS.md) file. + +* Nic Fair - tutorial, examples and education design and delivery +* Ken Meacham - Web UI and Java SSM APIs; Java SSM release management +* Panos Melas - SSM Python adaptor; adaptor release management +* Stephen Phillips - SSM continuous integration; build & test systems; attack path tool; overview paper; documentation +* Sam Senior - mathematical theory of risk management; papers +* Dan Shearer - overall open project release management; physical infrastructure; documentation and tooling +* Mike Surridge - domain model maintainer; domain editing tool maintainer; risk calculator in Java SSM; theory and papers +* Steve Taylor - developing and maintaining new system models; developing new domain models diff --git a/README.md b/README.md index c2cd2b19..0d9b7c3d 100644 --- a/README.md +++ b/README.md @@ -1,25 +1,121 @@ -# Spyderisk System Modeller +# The Spyderisk System Modeller software -The Spyderisk System Modeller (SSM) provides a thorough risk assessment of -complex systems making use of context and connectivity to take into account the -web of attack paths and secondary threat cascades in a system. +This is the main software part of the [Spyderisk Open Project](https://github.com/Spyderisk). -Spyderisk assists the user in following the risk assessment process defined in -ISO 27005 and thus supports the Information Security Management System defined -in ISO 27001. The Spyderisk System Modeller is a generic risk assessment tool -and must be configured with a model of a domain ("knowledgebase"), containing -the available asset types and relations, descriptions of the threats, the -possible security controls, and more. +The overall Spyderisk Project's aim is to: -The Spyderisk software does not come bundled with any particular knowledgebase; -this is configurable at build/deploy time, by putting one or more zip bundles -into the "knowledgebases" folder (described in more detail later). An example -knowledgebase has been developed for complex networked information systems, -which is available here: -https://github.com/Spyderisk/domain-network/packages/1826148 +> understand the trustworthiness of socio-technical systems by establishing an international Open Community supporting the research, development, use and support of open, effective, and accessible risk assessment methods, knowledge and tools. -The web-based graphical user interface guides the user through the following -steps: +The Spyderisk System Modeller software ("Spyderisk") provides a thorough +risk assessment of complex systems, applying our mathematical modelling to your +particular problem. This software is the result of +[15 years of history in risk assessment](https://github.com/Spyderisk/system-modeller/blob/dev/HISTORY.md) +of complex socio-technical systems. Starting in 2023 we publish everything under +open licenses: software source code, ontologies, domain model database, +online training, documentation, and academic papers. + +As of Mid-2024, the Spyderisk software is in early release, fully available but only working +in quite specific circumstances. This README file signposts you to the +different ways of trying out Spyderisk and learning about the theory behind it. + +If you are a researcher in the area of risk modelling including ontologies of +risk, or if you have a specific problem domain you need to solve (particularly +in cybersecurity or privacy) then Spyderisk could be for you. + +# Contents + +* [Introduction](#introduction) +* [Important project information](#important-project-information) +* [What is the Spyderisk System Modeller?](#what-is-the-spyderisk-system-modeller) +* [Your next steps](#your-next-steps) +* [Process of using system-modeller](#process-of-using-system-modeller) + + +# Introduction + +This README relates to the [system-modeller source tree](https://github.com/Spyderisk/system-modeller), +which provides both a web service and a web-based user interface. +If you only wish to install and run Spyderisk and its +web GUI, see instead the [Spyderisk Deployment Project](https://github.com/Spyderisk/system-modeller-deployment). +Spyderisk will only build and run on Linux, however, it can be deployed to non-Linux systems +using Docker containers. Docker is used to provide a consistent build and test environment for +developers and for the continuous integration (CI) system. + +This source tree is for: + +* those who want to inspect or change the [Spyderisk source code](./src/main/java/uk/ac/soton/itinnovation/security/README.md) +* building and running Spyderisk from its source code +* reading all [Spyderisk technical papers](./docs/papers/README.md) in one place +* understanding the generous [Spyderisk open licensing](./LICENSES.md) +* reading the [schema explanation](./docs/triple-store-schema.md) for the [base ontology in RDF format](https://github.com/Spyderisk/system-modeller/blob/dev/src/main/resources/core.rdf) which ships with Spyderisk + +If you wish to interact programmatically with Spyderisk instead of using the +web GUI, the [Spyderisk Python adaptor](https://github.com/Spyderisk/system-modeller-adaptor) +may be for you. This is the way you can call the Spyderisk web service API to create, update, analyse and query +system models and integrate other tools. While this is also Spyderisk software development, it +is much higher-level than the source code of the Spyderisk application found in this tree. The +Java application creates the reasoner service which the Python adapter can interrogate. + +# Important project information + +Spyderisk is created by the [Spyderisk Contributors](./CONTRIBUTORS.md), freely +available under [Open Source terms](./LICENSE.md). Everyone is welcome, noting +our [basic rules of decent behaviour](./CODE-OF-CONDUCT.md) around Spyderisk, +which includes contact details if you want to report a behaviour problem. + +We try to make it easy to [contribute to Spyderisk](./CONTRIBUTING.md) whatever your skills. + +You can contact us by: +* [raising a GitHub Issue](https://github.com/Spyderisk/system-modeller/issues/new) +* emailing [team@spyderisk.org](mailto://team@spyderisk.org) + +# What is the Spyderisk System Modeller? + +The Spyderisk System Modeller is a generic risk assessment tool. Spyderisk +must be supplied with a model of a domain of study, which defines a +simplified version of the real world and the different threats and mitigations +that apply. The use case we have developed the most relates to cybersecurity +analysis, however the Spyderisk team also models risks in other areas including +medical devices and privacy. We call a domain model the "knowledgebase", and it +uses ontological methods. + +Spyderisk does not come bundled with any particular knowledgebase; this is +configurable at build/deploy time, by putting one or more zip bundles into the +"knowledgebases" folder (described below). We publish and +maintain our most advanced +[knowledgebase for complex networked systems](https://github.com/Spyderisk/domain-network/packages/1826148) +in its own GitHub repository. + +When using our knowledgebase for cybersecurity analysis, Spyderisk assists the user in following +the risk assessment process defined in ISO 27005 from the +[ISO 27001](https://en.wikipedia.org/wiki/ISO/IEC_27000-series) +of standards. We found the 27k standards do not have all the required concepts +for effective risk modelling, and our knowledgebase is significantly richer than +what is found in the standards. Since a Spyderisk knowledgebase is based on an underlying +ontology, we have created an ontology which is broadly compatible with the ISO27k terminology. + +The system-modeller tree has approximately 70k lines of Java code in the core service, +and another 20k of Java code for running tests. + + +# Your next steps + +If you only want to run a demo of the Spyderisk System Modeller and do not need to do any development, +then you need to follow the [Installing Docker](./INSTALL.md#installing-docker) section of [INSTALL.md](./INSTALL.md) +and then use the [Spyderisk Deployment project](https://github.com/Spyderisk/system-modeller-deployment). +We also [have two comprehensive online training courses](https://training.spyderisk.org/courses/), covering +both Risk Assessment and Using Spyderisk. + +From here, within this source tree, you may: + +* [compile and install Spyderisk from source code](./INSTALL.md) +* [start Spyderisk software development](./docs/development.md) + +# Process of using system-modeller + +Once installed, whether from the source code in this software tree or via +the Spyderisk deployment project, the graphical web user interface guides the +user through the following steps: 1. The user draws a model of their system model by dragging and dropping typed assets linked by typed relations onto a canvas. @@ -63,793 +159,4 @@ path does). For the operational risk assessment, the state of the system model must first be synchronised with the current operational state (for instance through integration via the API with OpenVAS or a SIEM). -This project provides both a web service and a web-based user interface. An -API is provided to create, update, analyse and query system models and -integrate other tools. - -Docker is used to provide a consistent build and test environment for -developers and for the continuous integration (CI) system. If you want to do a -demo of the Spyderisk System Modeller and do not need to do any development -then you need to refer to the [Installing Docker](#installing-docker) section -and then use the separate "system-modeller-deployment" project. - -Development of the software began in 2013, drawing on research dating back to -2008. It was open-sourced in early 2023. The research and development up to the -point of open sourcing was done solely by the [University of Southampton IT -Innovation Centre](http://www.it-innovation.soton.ac.uk/) in a variety of UK -and EU research projects. - -## Pre-requisites - -You will need `git`, `git-lfs`, `docker` and `docker-compose`. See below for -more detail. - -## Quick Start - -The following instructions assume that you have the pre-requisites installed -and working, and have cloned the repository. - -N.B. Prior to running the following commands, you should also ensure that you -have one or more knowledgebases (domain models) available for installation. -These are available as zip file "bundles", containing the domain model itself, -along with the icons and mapping file needed for generating a UI palette of -visual assets. - -An example knowledgebase is available at: - Here, you will -find the latest .zip bundle, at the bottom of the "Assets" list. This file -should be downloaded and copied into the system-modeller/knowledgebases folder. -Once Spyderisk has been started up (see instructions below), these zip files -will be automatically extracted and loaded into Spyderisk. - -Of course, you may choose not to install a default knowledgebase, however, when -the Spyderisk GUI first loads in your browser, you will be directed to load in -a new knowledgebase manually. - -1. `$ cd system-modeller` -2. `$ docker-compose up -d` -3. `$ docker-compose exec ssm bash` -4. `$ ./gradlew assemble bootTest` -5. Go to in your browser. -6. Login in using `testuser` or `testadmin` with password `password`. - -N.B. The links in the user interface for the attack graph image does not work in the development environment. Links to Keycloak account management functions and documentation do not work via port 8081 but do work via port 8089. - -Please also note that the default setup is to recreate all databases on initial -start-up. In order to persist any installed knowledgebases and created system -models, you should ensure that the environment variable `RESET_ON_START=false` prior to re-running `./gradlew assemble bootTest`. -In the bash shell, that can be done with `export RESET_ON_START=false`. -Alternatively, put `RESET_ON_START=false` in your `.env` file and "source" it with `set -a; source .env; set +a`. - -## Installing Docker - -Please see the [Docker website](https://www.docker.com/) for details. - -### Windows - -To use Docker in Windows you must enable Hyper-V. This means that you can no -longer use VirtualBox (used as Vagrant's hypervisor). - -Download and install "Docker Desktop". - -#### With WSL2 - -Docker Desktop integrates with WSL2 (Windows Sub-system for Linux v2). WSL2 -provides a Linux environment deeply integrated into Windows. As many -development tools are designed for Linux, using WSL2 can make things easier. -Docker provide [instructions for the Docker Desktop WLS2 -backend](https://docs.docker.com/docker-for-windows/wsl/) which should be -followed. - -As part of the WSL2 installation, [you choose and install a Linux distribution -to -use](https://docs.microsoft.com/en-us/windows/wsl/install-win10#step-6---install-your-linux-distribution-of-choice). - -Once WSL2 and Ubuntu are installed, open a terminal window of some sort and -type `wsl` to switch to your default WSL2 Linux distro. You will need to copy -your private SSH key into the `.ssh` folder in the distro. You can access your -`C:` drive with the path `/mnt/c`: - -```shell -wsl -cd -mkdir .ssh -chmod 700 .ssh -cp /mnt/c/path/to/your/key/id_rsa .ssh -chmod 600 .ssh/id_rsa -``` - -You may also want to limit the host resources that Docker Desktop is permitted -to use. This can be done with a `.wslconfig` file in your `%UserProfile` -folder, e.g.: - -``` -[wsl2] -memory=10GB -processors=8 -``` - -At this point you have a functional Linux system. Please skip to the Linux -sub-section for the rest of the instructions. - -#### Without WSL2 - -If you are not using WSL2, you will have to permit Docker Desktop to access the -location on your disk where you have the system-modeller cloned. Either (in -advance) add a file-share for "C:\" in the Docker Desktop UI or be more -specific to the area of the disk where the system-modeller is checked out. -Alternatively, wait for Docker Desktop to pop up a request for file sharing -when you execute the compose file. - -You must also configure resource usage in the Docker Desktop UI. Configure it -to: - -* have more memory, CPU, swap (e.g. all CPUs, 8GB memory, 2GB swap); - -### Linux - -Many Linux distributions already have Docker installed. The following command -will work in `apt` based systems such as Ubuntu. To install Docker: - -```shell -sudo apt-get install docker docker-compose -``` - -## Docker Concepts - -Docker manages individual containers and its configuration files by default are -called "Dockerfile". It is possible to manually orchestrate (and connect) -docker containers but it is easier to manage multiple containers using -"docker-compose". Docker Compose files by default are called -"docker-compose.yml" and they refer to docker images (either to be pulled from -a docker registry) or to local Dockerfiles. - -### Images - -Images are the equivalent of VM images. They are built in layers on top of -standard distributions (e.g. from Docker hub). Names and tags (e.g. -"postgres:9.6.19" or "mongo:latest"). Be careful with the "latest" tag as use -of it does not guarantee that you are actually getting the latest version - it -is just a string like any other. By convention it will be the latest but -perhaps not if it is retrieved from your local cache. - -Commands: - -* List all local images with `docker image ls` - -### Containers - -Containers are running instances of images. They can be paused, unpaused, -stopped and started once created or just destroyed and recreated. The state of -a container changes as it runs with processes writing to disk and updating -memory. Writing to disk creates a new layer in the image by default or changes -a persistent "volume" if it is defined (see below). If a container is paused -then all the processes are paused (with `SIGSTOP`) and can be resumed but if a -container is stopped (`SIGTERM` then `SIGKILL`) then the memory state is lost. - -Commands: - -* List containers that relate to the local `docker-compose.yml` file with - `docker-compose ps` -* List running containers with `docker container ls` or just `docker ps` -* List all containers with `docker container ls -a` or just `docker ps -a` -* Remove the containers that relate to the local `docker-compose.yml` file with - `docker-compose rm` -* Remove a container with `docker container rm ` -* Remove containers that are not running with `docker container prune` (be - careful!) - -### Volumes - -There are two main sorts of volume: - -1. where a folder from the host is mounted in the container (a "bind mount"); -2. where a volume is created in a private space used by Docker. There are - "named volumes" and "anonymous volumes" of this type. - -We use (1) to mount the source code from the host into the container so that -editing can be done on the host and building in the container. Volume type (2) -is used for folders in the container where the contents changes such as the -`build` folder and gradle cache. All volumes are persisted separately to the -container. Anonymous volumes are given enormous random identifiers so they are -hard to identify. Named volumes can be shared between two different executing -containers and can be reused between container instantiations. Anonymous -volumes are not reused, they are left as orphans when a container is destroyed. - -Named volumes for the databases are defined in `docker-compose.yml`. They are -called `jena`, `mongo-db` and `mongo-configdb`. When docker-compose creates the -volumes, it prefixes those names with the "project name" which by default is -the name of the folder containing the `docker-compose.yml` file. Therefore the -volume names are likely to be e.g. `system-modeller_jena`. - -**N.B. if you have two system-modeller folders with the same name then they -will end up using the same named volumes: this is almost certainly not what you -want.** - -Named volumes for build artifacts are defined in the -`docker-compose.override.yml` file. They cover the `gradle`, `npm` and `maven` -artifact folders. - -Commands: - -* List all volumes with `docker volume ls` -* Remove a volume with `docker volume rm ` -* Remove all volumes that are not used by a container (running or not) with - `docker volume prune` (it is sensible to do this periodically to save disk -space) - -### Networks - -Networks provide communication between containers. - -### Container registry - -Images (not containers actually) are stored in "registries". There is a cache -for images on your local machine and there are remote registries. We use Docker -Hub for standard images. - -## Use of Docker - -Docker: manages single containers. We have a multi-stage `Dockerfile` which -defines three different containers for the SSM: - -* The `ssm-dev` container holds the development environment (including Java, - Gradle, Maven, etc). It is the one used by developers who will be building -the SSM themselves with `gradle` commands. The basic strategy is to keep the -source code files on the host and mount them into the SSM container. In this -way, the build tools are isolated from the host but the source code can still -be edited easily in any editor on the host. -* The `ssm-build` container executes a clean build of the SSM and is used by - the CI system. -* The `ssm-production` container is created by the CI and is a light-weight - container with just the software necessary for executing the SSM. It can be -used for demos and is intended for "production" deployment. The "production" -image is built on any branch that the CI builds (e.g. master and dev). The -"production" image is used in the separate "system-modeller-deployment" -project. - -Docker-compose: orchestrates multiple containers. We use several files: - -* The `docker-compose.yml` defines the base orchestration of the SSM - development container with supporting services (e.g. a MongoDB container). -* The `docker-compose.override.yml` file is (by default) overlayed on top of - `docker-compose.yml` and adds in the development environment. -* The `docker-compose.test.yml` file is used (primarily by the CI pipeline) to - execute the integration tests. - -## Initialise for Development - -### Install git and git-lfs - -On an Ubuntu system: - -```shell -sudo apt-get update -sudo apt-get install git git-lfs -``` - -### Run an SSH Agent - -You should be using an SSH key to authenticate with GitLab. To avoid typing in -the private key password all the time, you should run an SSH agent which holds -the unencrypted private key in memory. In Windows you can use e.g. `pageant` -(part of Putty). In Linux (or WSL2) do: - -```shell -eval `ssh-agent` -ssh-add -``` - -### Clone the system-modeller Git Repository - -Cloning the `system-modeller` repository makes a copy of all the files (and -their history) on your local machine. If you are using WSL2 then you should -clone the repository within your chosen Linux distribution. - -```shell -git clone git@github.com:SPYDERISK/system-modeller.git -cd system-modeller -``` - -### Customise default Configuration Parameters (Optional Step) - -The default configuration of the Spyderisk service, including service ports and -credentials, can be customized through the '.env' file. To get started, please -make a copy of the provided `.env.template` file and rename it to `.env`. Then, -you can modify any of the default parameters in the `.env` file to match your -specific environment configuration. - -### Download and Install default Knowledgebase(s) - -Syderisk requires one or more knowledgebase (domain model) to be installed, -prior to being able to develop system models in the GUI. These are available as -zip file "bundles", containing the domain model itself, along with the icons -and mapping file needed for generating a UI palette of visual assets. - -An example knowledgebase is available at: -https://github.com/Spyderisk/domain-network/packages/1826148 Here, you will -find the latest .zip bundle, at the bottom of the "Assets" list. This file -should be downloaded and copied into the system-modeller/knowledgebases folder. -Once Spyderisk has been started up (i.e. via starting the containers), these -zip files will be automatically extracted and loaded into Spyderisk. - -Of course, you may choose not to install a default knowledgebase, however, when -the Spyderisk GUI first loads in your browser, you will be directed to load in -a new knowledgebase manually. - - -### Starting the Containers - -To optimise the build, configure Docker to use "buildkit": - -```shell -export DOCKER_BUILDKIT=1 -``` - -To bring the containers (ssm, mongo, keycloak) up and leave the terminal -attached with the log files tailed: - -```shell -docker-compose up -``` - -Alternatively, to bring the containers up and background (detach) the process: - -```shell -docker-compose up -d -``` - -The `docker-compose.yml` file does not set the `container_name` property for -the containers it creates. They therefore get named after the directory -containing the `docker-compose.yml` file (the "project name") along with the -identifier in the `docker-compose.yml` file and a digit (for uniqueness). The -directory containing the `docker-compose.yml` file will, by default, be called -`system-modeller` as that is the default name when doing `git clone`. Docker -Compose picks up this name and uses it as the "project name". If more than one -instance of the SSM is required on one host, an alternative project name is -needed: either by renaming the `system-modeller` folder (recommended) or by -using the `-p` flag in `docker-compose` (e.g. `docker-compose -p -up -d`) but you must remember to use this flag every time. - -### Getting a Shell - -To get a shell in the `ssm` container: - -```shell -docker-compose exec ssm bash -``` - -The equivalent `docker` command requires the full container name and also the -`-it` flags to attach an interactive terminal to the process, e.g.: - -```shell -docker exec -it system-modeller_ssm_1 bash -``` - -### Viewing logs - -To see the logs from a service and `tail` the log so that it updates, the -command is: - -```shell -docker-compose logs -f -``` - -Where `` could be e.g. `ssm`. - -### Port Mappings - -The various server ports in the container are mapped by Docker to ports on the -host. The default ports on the host are defined in `docker-compose.yml` and `docker-compose.override.yml`: - -* 3000: Nodejs (3000) on the `ssm` container -* 5005: Java debugging (5005) on the `ssm` container -* 8080: Keycloak (8080) on the `keycloak` container -* 8081: Tomcat (8081) on the `ssm` container -* 8089: Nginx (80) on the `proxy` container - -To change the ports mapping it is best to copy the `.env.template` file to `.env` and define the port numbers there. This is necessary if you need to run multiple instances of the service on the same host. - -The Nginx reverse proxy forwards requests to the appropriate container and also includes redirects for documentation links. Therefore, it is advised to use port 8089 - -*The rest of this document assumes the default port mapping.* - -To see the containers created by the `docker-compose` command along with their -ports: - -```shell -$ docker-compose ps -NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS -system-modeller-proxy-1 nginx:stable-alpine3.17 "/tmp/import/entrypo…" proxy 23 minutes ago Up 23 minutes 0.0.0.0:8089->80/tcp -system-modeller-keycloak-1 keycloak/keycloak:21.0 "/tmp/import/entrypo…" keycloak 23 minutes ago Up 23 minutes 0.0.0.0:8080->8080/tcp, 8443/tcp -system-modeller-mongo-1 mongo:5.0.16-focal "docker-entrypoint.s…" mongo 23 minutes ago Up 23 minutes 27017/tcp -system-modeller-ssm-1 system-modeller-ssm "tail -f /dev/null" ssm 23 minutes ago Up 23 minutes 0.0.0.0:3000->3000/tcp, 0.0.0.0:5005->5005/tcp, 0.0.0.0:8081->8081/tcp``` - -You might contrast that with a list of all containers on the host found through -the `docker` command: - -```shell -$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -01cc2804cadf nginx:stable-alpine3.17 "/tmp/import/entrypo…" 24 minutes ago Up 24 minutes 0.0.0.0:8089->80/tcp system-modeller-proxy-1 -0a91f360c30b system-modeller-ssm "tail -f /dev/null" 24 minutes ago Up 24 minutes 0.0.0.0:3000->3000/tcp, 0.0.0.0:5005->5005/tcp, 0.0.0.0:8081->8081/tcp system-modeller-ssm-1 -1b27ac53ec18 keycloak/keycloak:21.0 "/tmp/import/entrypo…" 24 minutes ago Up 24 minutes 0.0.0.0:8080->8080/tcp, 8443/tcp system-modeller-keycloak-1 -a67ba45f70c5 mongo:5.0.16-focal "docker-entrypoint.s…" 24 minutes ago Up 24 minutes 27017/tcp system-modeller-mongo-1 -``` - -## Development - -The system-modeller source code is synchronised with the `ssm` container. This -means that you can use your favourite source code editor on your host machine -but still do the build and execution inside the `ssm` container. The -system-modeller folder is mounted at `/code` inside the `ssm` container. - -Other folders which are written to by the build/execution such as `build`, -`logs`, `jena-tdb` are not mounted from the host for performance reasons. They -may only easily be accessed from within the container. - -### Gradle Tasks - -The main `build.gradle` file has a few tasks defined as well as the standard -ones: - -* assemble: builds the WAR including compiling Java and bundling JS -* test: compiles the Java and runs the tests (`classes` and `testClasses`) -* build: does assemble and also does test -* bootDev: does Spring's `bootRun` task with the profile set to `dev` and - without any dependencies running -* bootTest: does Spring's `bootRun` task with the profile set to `test` and - building the webapp first -* `gradle :taskTree :` shows what `` will do (use `--no-repeat` to - remove repeated tasks) - -There is also a `build.gradle` in `src/main/webapp` for the web application. It -mostly runs `yarn` commands via the gradle yarn plugin (yarn itself is not -directly installed and available). - -As yarn is not available directly, to add or remove packages during development -use commands such as: - -* `gradle addPackage -P packName="react-dom@4.6.6"` -* `gradle addPackage -P packName="lodash" -P dev="true"` -* `gradle removePackage -P packName="react-dom"` -* `gradle install` - -After a commit that has changed the contents of `src/main/webapp/package.json`, -a `gradle install` is necessary to update the local cache. This runs a `yarn -install` which removes any unnecessary packages and installs the packages in -`package.json`, in addition to any new additions. `gradle build` only rebuilds -the cache of webapp from scratch if a new clean environment is found. - -To create the webapp environment from scratch, follow the steps below: - -1. `cd src/main/webapp` -2. `gradle clean` -3. `rm -rf src/main/webapp/node_modules` -4. `rm -rf src/main/webapp/.gradle` -5. `gradle install` - -### Keycloak - -The development environment initialises an *insecure* Keycloak service. The -Keycloak configuration is stored in `provisioning/keycloak/ssm.json` and: - -* creates a realm (`ssm-realm`) within which there is a `user` and `admin` role - defined; -* permits holders of the `admin` role to manage the realm's users; -* creates a client (`system-modeller`) and uses the - `KEYCLOAK_CREDENTIALS_SECRET` environment variable (defined in `.env`) to -insert a shared secret for communication with the system-modeller service; -* creates a user called `testuser` holding the `user` role, with password - `password`; -* creates a user called `testadmin` holding the `admin` role, with password - `password`. - -The Keycloak (master realm) administrator username and password is also defined -in `.env` and is admin/password. - -### Frontend Development - -[Get a shell](#getting-a-shell) on the `ssm` container, build the code and -start the backend server on port 8081 : - -```shell -docker-compose exec ssm bash -cd /code -./gradlew build -./gradlew bootDev -``` - -This starts a Tomcat servlet which handles API requests and also handles -requests for the simple HTML pages. Using `bootDev` is the same as doing -`./gradlew bootRun` but sets `spring.profiles.active` to `dev` which means that -the properties from `src/main/resources/application-dev.properties` are -overlayed on the standard property file. This is defined in the `bootDev` -target of `build.gradle`. Note that whereas `bootRun` would compile, `bootDev` -does not. - -The command does not exit until you press Ctrl-C at which point the server is -stopped. If necessary the backend can be recompiled with `./gradlew test` or -just `./gradlew classes` and the server started again with `./gradlew bootDev`. - -If `application.properties` changes then `./gradlew assemble` is needed to get -it into the webapp. - -[Get another shell](#getting-a-shell) on the `ssm` container and start the -frontend server on port 3000 (which will be used by e.g. the dashboard and -modeller pages): - -```shell -docker-compose exec ssm bash -cd /code -./gradlew start -``` - -Note that this gradle target is defined in `src/main/webapp/build.gradle`. It -starts the server defined in `src/main/webapp/server.js` which uses the Express -framework on top of NodeJS to serve the part of the SSM with the canvas in (the -main part). It proxies requests for other pages through to the Spring Java -backend. - -The command does not exit until you press Ctrl-C but upon doing so the NodeJS -server continues executing. There is another gradle task `./gradlew stopNode` -which kills all node processes. - -When running this NodeJS server, webpack compile events are listened for and -the client web page is automatically updated. Sometimes reloading the page in -browser is needed, but normally the hot reload works fine. - -Note: the ports 8081 and 3000 are hard-coded into the Express `server.js` file. -Any change to the port mapping needs to be reflected there. - -If, when running `./gradlew start` you get an error message about `Error: -listen EADDRINUSE: address already in use 0.0.0.0:3000` or similar, it is -likely that Node is already running. You might want to stop it and start it -again with `./gradlew stopNode start`. - -#### Debugging the Frontend - -It is recommended that you install the following plugins in Chrome (or similar -browser): - -* React Developer Tools: shows the React component hierarchy and each - component's (editable) state and props. -* Redux DevTools: show the application state, including how it changes with - events - -In VSCode (for instance), the following configuration in `.vscode/launch.json` -will enable debugging of the frontend from within VSCode (launch with F5): - -```json -{ - "version": "0.2.0", - "configurations": [ - { - "type": "pwa-chrome", - "request": "launch", - "name": "Launch Chrome against localhost:3000 for frontend dev", - "url": "http://localhost:3000/system-modeller", - "webRoot": "${workspaceFolder}/src/main/webapp", - } - ] -} -``` - -### Backend Development - -If the main web UI is not being changed then it is simpler not to run the -NodeJS server. - -Get a shell on the ssm container (see above). - -Build the code and start the backend server: - -```shell -docker-compose exec ssm bash -cd /code -./gradlew build -./gradlew bootTest -``` - -The bootTest target sets `spring.profiles.active` to `test` but it is not clear -that this has any effect (TODO). It also bundles the Javascript webapp and then -extracts the files. Finally it runs the `./gradlew boot` task which starts a -Tomcat servlet. As a result the whole SSM application works but the frontend is -served from static files that are not hot-reloaded. - -The SSM served by Tomcat can be accessed at - (via the proxy) or direct to Tomcat via port 8081. - -#### Debugging the Backend - -Add the flag `--debug-jvm` to any of the usual `gradle` commands and the JVM -will wait for a debugger to connect on guest port 5005. It is the equivalent of -adding `-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005` to -the JVM command line. - -```shell -./gradlew bootTest --debug-jvm -``` - -Then connect to `localhost:5005` from your IDE. - -In VSCode, for instance, debugger connections are configured in -`.vscode/launch.json`. The necessary configuration is: - -```json -{ - "version": "0.2.0", - "configurations": [ - { - "type": "java", - "name": "Attach to Java debugger on localhost:5005 for backend dev", - "request": "attach", - "hostName": "localhost", - "port": 5005 - } - ] -} -``` - -### Shutdown and Persistence - -The containers can be paused (and unpaused) which pauses the processes inside -the container and thus releases host resources but does not lose process state: - -```shell -docker-compose pause -docker-compose unpause -``` - -The containers can be stopped (and started) which will kill all the processes -running in the container but leave the container present: - -```shell -docker-compose stop -docker-compose start -``` - -If you originally used `docker-compose up` to start the containers without -detaching (with `-d`) then `Ctrl-C` is the same as `docker-compose stop`. - -The `docker-compose down` command stops the containers, removes them and -removes the networks they were using. There are also optional parameters to -remove the volumes and images: - -```shell -docker-compose down -``` - -In all these cases, the (Docker disk) volumes are persisted and named volumes -will be reattached to new containers, during restart. Assuming that you have -`RESET_ON_START=false` in your `.env` file, this also means -that any knowledgebases (domain models), system models, palettes, etc will be -persisted after restarting the containers. - -If the intention is to recreate the databases or reinstall the default -knowledgebases, this may be done in the following ways: - -a) Use `docker-compose down -v`, then restart containers and Spyderisk as -normal, e.g. - -```shell -docker-compose down -v -docker-compose up -d -docker-compose exec ssm bash -./gradlew assemble bootTest -``` - -b) Leave containers running, but set `RESET_ON_START=true` then restart Spyderisk, e.g. - -```shell -docker-compose exec ssm bash -export RESET_ON_START=true -./gradlew assemble bootTest -``` - - -### Building a Spyderisk System Modeller Image - -Sometimes, to test something you need to build a "production" image of the sort -built by the CI pipeline. You can then for instance use the image in the -`system-modeller-deployment` project. - -To build a production image use something like: - -`docker build --tag my-ssm-image --build-arg BUILDKIT_INLINE_CACHE=1 --file Dockerfile --target ssm-production .` - -If you need to test the image in the `system-modeller-deployment` project then -just edit the `docker-compose.yml` file in that project to reference -`my-ssm-image` instead of the image held remotely, e.g.: - -```yaml - ssm: - image: my-ssm-image:latest -``` - -When you're done with the image, remove it with `docker image rm my-ssm-image`. - -## Licences - -The [license finder](https://github.com/pivotal/LicenseFinder) software should -be used to find the licences of 3rd-party code. It is not installed in the dev -image by default. - -### Installation - -Install `license_finder`: - -```shell -apt-get install ruby -gem install license_finder -``` - -To use `license_finder` in the `webapp` folder, `yarn` (and therefore `npm`) is -also required (rather than the versions built in to the `gradle` plugin): - -```shell -apt-get install nodejs -apt-get install npm -npm install --global yarn -``` - -### Usage - -Decisions on which licences are approved (and why) are kept in the top-level -`dependency_decisions.yml` file. - -To find all licences and check them against the approved list: - -```shell -cd /code -license_finder --decisions-file=/code/dependency_decisions.yml -cd /code/src/main/webapp -license_finder --decisions-file=/code/dependency_decisions.yml -``` - -To generate an HTML report, use the command: - -```shell -license_finder report --format html --quiet --aggregate-paths /code /code/src/main/webapp --decisions-file=/code/dependency_decisions.yml > licences.html -``` - -To generate a CSV report, use the command: - -```shell -license_finder report --quiet --aggregate-paths /code /code/src/main/webapp --decisions-file=/code/dependency_decisions.yml --columns name version authors licenses license_links approved homepage package_manager --write-headers > licences.csv -``` - -## OpenAPI - -The OpenAPI v3 documentation is automatically generated from the Java service -code and is available from a live service at - -* `http://server:port/context-path/v3/api-docs`, e.g. - `/system-modeller/v3/api-docs` (for JSON) -* `http://server:port/context-path/v3/api-docs.yaml`, e.g. - `/system-modeller/v3/api-docs.yaml` (for YAML) - -The Swagger UI is also available for browsing the API: - -* `http://server:port/context-path/swagger-ui.html`, e.g. - `/system-modeller/swagger-ui.html` - -The file [openAPI-3-schema.YAML](docs/openapi/openAPI-3-schema.YAML) in this -repository is created by hand by combining the autogenerated YAML file along -with the first few lines of the existing file. - -Note that the object fields `aLabel`, `mLabel` and `rLabel` used in -`MisbehaviourSet` and `Node` are inconsistent between the OpenAPI file and the -JSON returned by the service. The OpenAPI file suggests they are all lower-case -but in the JSON they are camelCase (`aLabel` etc). To auto-generate effective -client code from the OpenAPI document it may be necessary to first replace -`alabel` with `aLabel` and so on. - -Another change that may be necessary is to replace `date-time` with `int64` -where the following fragment is found: -```yaml -created: - type: string - format: date-time -``` diff --git a/build.gradle b/build.gradle index 73341bfb..472caf6d 100644 --- a/build.gradle +++ b/build.gradle @@ -46,7 +46,7 @@ project.buildDir = "${project.projectDir}/build/build" def gitCommit = "git rev-parse HEAD".execute().text.trim() def systemModellerName = "system-modeller" -def systemModellerVersion = "3.5.0-SNAPSHOT" +def systemModellerVersion = "3.5.0" def javaVersion = 1.8 version = systemModellerVersion // picked up by sonarqube diff --git a/docs/BUILD_EXPLANATION.md b/docs/BUILD_EXPLANATION.md index 1841727e..7afbc77a 100644 --- a/docs/BUILD_EXPLANATION.md +++ b/docs/BUILD_EXPLANATION.md @@ -4,7 +4,7 @@ ### Outline: -![Simple outline of how the elements interact with each other](JS_BUILD_DIAGRAM.png) +![Simple outline of how the elements interact with each other](./images/JS_BUILD_DIAGRAM.png) ### Tools: diff --git a/docs/Spyderisk-Open-Project-Strategy.md b/docs/Spyderisk-Open-Project-Strategy.md new file mode 100755 index 00000000..319b9cd5 --- /dev/null +++ b/docs/Spyderisk-Open-Project-Strategy.md @@ -0,0 +1,33 @@ +# Open Project Strategy + +![Spyderisk Open Project](./images/spyderisk-logo.png) + +## Introduction + +This document defines the strategy for the [Spyderisk Open Project](https://github.com/Spyderisk) defining the vision, aims and strategic objectives. The strategy aims to: + +* communicate openly to collaborators what we are aiming to achieve. +* provide structure, steering and guidance to collaborator activities and decision making. +* monitor progress against our strategic objectives for all stakeholders. + +## Vision + +A world of trustworthy socio-technical systems responsibly designed, developed and operated using open, effective, and accessible risk assessment: + +* socio-technical systems come with standardised risk assessment models describing trustworthiness assumptions and security controls. +* system developers and users compose risk assessment models using Spyderisk methods, knowledge and tools to carry out risk assessment. +* tools are fast and easy to use, and their output is reproducible and explainable. +* tools and risk knowledge are co-produced by the community reflecting input from all stakeholders. + +## Aim + +The Spyderisk Open Project aims to revolutionise understanding of the trustworthiness of socio-technical systems by establishing an international Open Community supporting the research, development, use and support of open, effective, and accessible risk assessment methods, knowledge and tools. + +## Strategic objectives + +1. Make risk assessment of socio-technical systems reliable, open and accessible to all as a public good, free and easy to use. +2. Foster a diverse and inclusive community of users and contributors who offer a meaningful voice and language for socio-technical risk assessment that reflects this diversity. +3. Openly share, curate and engineer published machine-readable and explainable knowledge describing socio-technical risks and their causes, effects and controls to manage them. +4. Develop effective risk assessment methods and tooling, including alternative implementations, co-produced and used by the community to tackle existing and emerging socio-technical risks. +5. Ensure project stewardship is conducted openly by diverse and representative stakeholders for the benefit of communities. + diff --git a/docs/development.md b/docs/development.md new file mode 100644 index 00000000..18b01149 --- /dev/null +++ b/docs/development.md @@ -0,0 +1,593 @@ +# Spyderisk development + +This document is about development on the Spyderisk web application and backend +service, written in Java (and Javascript, of course.) It is not for development +using the Spyderisk Python adaptor, and it is not for development of Spyderisk +domain models. + +# Contents + +* [Initialise for Development](#initialise-for-development) + * [Install git and git-lfs](#install-git-and-git-lfs) + * [Run an SSH Agent](#run-an-ssh-agent) + * [Clone the system-modeller Git Repository](#clone-the-system-modeller-git-repository) + * [Customise default Configuration Parameters (Optional Step)](#customise-default-configuration-parameters-(optional-step)) + * [Download and Install default Knowledgebase(s)](#download-and-install-default-knowledgebase(s)) + * [Starting the Containers](#starting-the-containers) + * [Getting a Shell](#getting-a-shell) + * [Viewing logs](#viewing-logs) + * [Port Mappings](#port-mappings) +* [Spyderisk application development](#spyderisk-application-development) + * [Gradle Tasks](#gradle-tasks) + * [Keycloak](#keycloak) + * [Frontend Development](#frontend-development) + * [Debugging the Frontend](#debugging-the-frontend) + * [Backend Development](#backend-development) + * [Debugging the Backend](#debugging-the-backend) + * [Shutdown and Persistence](#shutdown-and-persistence) + * [Building a Spyderisk System Modeller Image](#building-a-spyderisk-system-modeller-image) +* [OpenAPI](#openapi) +* [License checks](#license-checks) + * [The LicenseFinder tool](#the-licensefinder-tool) + * [Installation](#installation) + * [Usage](#usage) + +# Initialise for Development + +## Install git and git-lfs + +On an Ubuntu system: + +```shell +sudo apt-get update +sudo apt-get install git git-lfs +``` + +## Run an SSH Agent + +You should be using an SSH key to authenticate with GitLab. To avoid typing in +the private key password all the time, you should run an SSH agent which holds +the unencrypted private key in memory. In Windows you can use e.g. `pageant` +(part of Putty). In Linux (or WSL2) do: + +```shell +eval `ssh-agent` +ssh-add +``` + +## Clone the system-modeller Git Repository + +Cloning the `system-modeller` repository makes a copy of all the files (and +their history) on your local machine. If you are using WSL2 then you should +clone the repository within your chosen Linux distribution. + +```shell +git clone git@github.com:SPYDERISK/system-modeller.git +cd system-modeller +``` + +## Customise default Configuration Parameters (Optional Step) + +The default configuration of the Spyderisk service, including service ports and +credentials, can be customized through the '.env' file. To get started, please +make a copy of the provided `.env.template` file and rename it to `.env`. Then, +you can modify any of the default parameters in the `.env` file to match your +specific environment configuration. + +## Download and Install default Knowledgebase(s) + +Syderisk requires one or more knowledgebase (domain model) to be installed, +prior to being able to develop system models in the GUI. These are available as +zip file "bundles", containing the domain model itself, along with the icons +and mapping file needed for generating a UI palette of visual assets. + +An example knowledgebase is available at: +https://github.com/Spyderisk/domain-network/packages/1826148 Here, you will +find the latest .zip bundle, at the bottom of the "Assets" list. This file +should be downloaded and copied into the system-modeller/knowledgebases folder. +Once Spyderisk has been started up (i.e. via starting the containers), these +zip files will be automatically extracted and loaded into Spyderisk. + +Of course, you may choose not to install a default knowledgebase, however, when +the Spyderisk GUI first loads in your browser, you will be directed to load in +a new knowledgebase manually. + +## Starting the Containers + +To optimise the build, configure Docker to use "buildkit": + +```shell +export DOCKER_BUILDKIT=1 +``` + +To bring the containers (ssm, mongo, keycloak) up and leave the terminal +attached with the log files tailed: + +```shell +docker-compose up +``` + +Alternatively, to bring the containers up and background (detach) the process: + +```shell +docker-compose up -d +``` + +The `docker-compose.yml` file does not set the `container_name` property for +the containers it creates. They therefore get named after the directory +containing the `docker-compose.yml` file (the "project name") along with the +identifier in the `docker-compose.yml` file and a digit (for uniqueness). The +directory containing the `docker-compose.yml` file will, by default, be called +`system-modeller` as that is the default name when doing `git clone`. Docker +Compose picks up this name and uses it as the "project name". If more than one +instance of the SSM is required on one host, an alternative project name is +needed: either by renaming the `system-modeller` folder (recommended) or by +using the `-p` flag in `docker-compose` (e.g. `docker-compose -p +up -d`) but you must remember to use this flag every time. + +## Getting a Shell + +To get a shell in the `ssm` container: + +```shell +docker-compose exec ssm bash +``` + +The equivalent `docker` command requires the full container name and also the +`-it` flags to attach an interactive terminal to the process, e.g.: + +```shell +docker exec -it system-modeller_ssm_1 bash +``` + +## Viewing logs + +To see the logs from a service and `tail` the log so that it updates, the +command is: + +```shell +docker-compose logs -f +``` + +Where `` could be e.g. `ssm`. + +## Port Mappings + +The various server ports in the container are mapped by Docker to ports on the +host. The default ports on the host are defined in `docker-compose.yml` and `docker-compose.override.yml`: + +* 3000: Nodejs (3000) on the `ssm` container +* 5005: Java debugging (5005) on the `ssm` container +* 8080: Keycloak (8080) on the `keycloak` container +* 8081: Tomcat (8081) on the `ssm` container +* 8089: Nginx (80) on the `proxy` container + +To change the ports mapping it is best to copy the `.env.template` file to `.env` and define the port numbers there. This is necessary if you need to run multiple instances of the service on the same host. + +The Nginx reverse proxy forwards requests to the appropriate container and also includes redirects for documentation links. Therefore, it is advised to use port 8089 + +*The rest of this document assumes the default port mapping.* + +To see the containers created by the `docker-compose` command along with their +ports: + +```shell +$ docker-compose ps +NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS +system-modeller-proxy-1 nginx:stable-alpine3.17 "/tmp/import/entrypo…" proxy 23 minutes ago Up 23 minutes 0.0.0.0:8089->80/tcp +system-modeller-keycloak-1 keycloak/keycloak:21.0 "/tmp/import/entrypo…" keycloak 23 minutes ago Up 23 minutes 0.0.0.0:8080->8080/tcp, 8443/tcp +system-modeller-mongo-1 mongo:5.0.16-focal "docker-entrypoint.s…" mongo 23 minutes ago Up 23 minutes 27017/tcp +system-modeller-ssm-1 system-modeller-ssm "tail -f /dev/null" ssm 23 minutes ago Up 23 minutes 0.0.0.0:3000->3000/tcp, 0.0.0.0:5005->5005/tcp, 0.0.0.0:8081->8081/tcp``` + +You might contrast that with a list of all containers on the host found through +the `docker` command: + +```shell +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +01cc2804cadf nginx:stable-alpine3.17 "/tmp/import/entrypo…" 24 minutes ago Up 24 minutes 0.0.0.0:8089->80/tcp system-modeller-proxy-1 +0a91f360c30b system-modeller-ssm "tail -f /dev/null" 24 minutes ago Up 24 minutes 0.0.0.0:3000->3000/tcp, 0.0.0.0:5005->5005/tcp, 0.0.0.0:8081->8081/tcp system-modeller-ssm-1 +1b27ac53ec18 keycloak/keycloak:21.0 "/tmp/import/entrypo…" 24 minutes ago Up 24 minutes 0.0.0.0:8080->8080/tcp, 8443/tcp system-modeller-keycloak-1 +a67ba45f70c5 mongo:5.0.16-focal "docker-entrypoint.s…" 24 minutes ago Up 24 minutes 27017/tcp system-modeller-mongo-1 +``` + +# Spyderisk application development + +The system-modeller source code is synchronised with the `ssm` container. This +means that you can use your favourite source code editor on your host machine +but still do the build and execution inside the `ssm` container. The +system-modeller folder is mounted at `/code` inside the `ssm` container. + +Other folders which are written to by the build/execution such as `build`, +`logs`, `jena-tdb` are not mounted from the host for performance reasons. They +may only easily be accessed from within the container. + +## Gradle Tasks + +The main `build.gradle` file has a few tasks defined as well as the standard +ones: + +* assemble: builds the WAR including compiling Java and bundling JS +* test: compiles the Java and runs the tests (`classes` and `testClasses`) +* build: does assemble and also does test +* bootDev: does Spring's `bootRun` task with the profile set to `dev` and + without any dependencies running +* bootTest: does Spring's `bootRun` task with the profile set to `test` and + building the webapp first +* `gradle :taskTree :` shows what `` will do (use `--no-repeat` to + remove repeated tasks) + +There is also a `build.gradle` in `src/main/webapp` for the web application. It +mostly runs `yarn` commands via the gradle yarn plugin (yarn itself is not +directly installed and available). + +As yarn is not available directly, to add or remove packages during development +use commands such as: + +* `gradle addPackage -P packName="react-dom@4.6.6"` +* `gradle addPackage -P packName="lodash" -P dev="true"` +* `gradle removePackage -P packName="react-dom"` +* `gradle install` + +After a commit that has changed the contents of `src/main/webapp/package.json`, +a `gradle install` is necessary to update the local cache. This runs a `yarn +install` which removes any unnecessary packages and installs the packages in +`package.json`, in addition to any new additions. `gradle build` only rebuilds +the cache of webapp from scratch if a new clean environment is found. + +To create the webapp environment from scratch, follow the steps below: + +1. `cd src/main/webapp` +2. `gradle clean` +3. `rm -rf src/main/webapp/node_modules` +4. `rm -rf src/main/webapp/.gradle` +5. `gradle install` + +## Keycloak + +The development environment initialises an *insecure* Keycloak service. The +Keycloak configuration is stored in `provisioning/keycloak/ssm.json` and: + +* creates a realm (`ssm-realm`) within which there is a `user` and `admin` role + defined; +* permits holders of the `admin` role to manage the realm's users; +* creates a client (`system-modeller`) and uses the + `KEYCLOAK_CREDENTIALS_SECRET` environment variable (defined in `.env`) to +insert a shared secret for communication with the system-modeller service; +* creates a user called `testuser` holding the `user` role, with password + `password`; +* creates a user called `testadmin` holding the `admin` role, with password + `password`. + +The Keycloak (master realm) administrator username and password is also defined +in `.env` and is admin/password. + +## Frontend Development + +[Get a shell](#getting-a-shell) on the `ssm` container, build the code and +start the backend server on port 8081 : + +```shell +docker-compose exec ssm bash +cd /code +./gradlew build +./gradlew bootDev +``` + +This starts a Tomcat servlet which handles API requests and also handles +requests for the simple HTML pages. Using `bootDev` is the same as doing +`./gradlew bootRun` but sets `spring.profiles.active` to `dev` which means that +the properties from `src/main/resources/application-dev.properties` are +overlayed on the standard property file. This is defined in the `bootDev` +target of `build.gradle`. Note that whereas `bootRun` would compile, `bootDev` +does not. + +The command does not exit until you press Ctrl-C at which point the server is +stopped. If necessary the backend can be recompiled with `./gradlew test` or +just `./gradlew classes` and the server started again with `./gradlew bootDev`. + +If `application.properties` changes then `./gradlew assemble` is needed to get +it into the webapp. + +[Get another shell](#getting-a-shell) on the `ssm` container and start the +frontend server on port 3000 (which will be used by e.g. the dashboard and +modeller pages): + +```shell +docker-compose exec ssm bash +cd /code +./gradlew start +``` + +Note that this gradle target is defined in `src/main/webapp/build.gradle`. It +starts the server defined in `src/main/webapp/server.js` which uses the Express +framework on top of NodeJS to serve the part of the SSM with the canvas in (the +main part). It proxies requests for other pages through to the Spring Java +backend. + +The command does not exit until you press Ctrl-C but upon doing so the NodeJS +server continues executing. There is another gradle task `./gradlew stopNode` +which kills all node processes. + +When running this NodeJS server, webpack compile events are listened for and +the client web page is automatically updated. Sometimes reloading the page in +browser is needed, but normally the hot reload works fine. + +Note: the ports 8081 and 3000 are hard-coded into the Express `server.js` file. +Any change to the port mapping needs to be reflected there. + +If, when running `./gradlew start` you get an error message about `Error: +listen EADDRINUSE: address already in use 0.0.0.0:3000` or similar, it is +likely that Node is already running. You might want to stop it and start it +again with `./gradlew stopNode start`. + +### Debugging the Frontend + +It is recommended that you install the following plugins in Chrome (or similar +browser): + +* React Developer Tools: shows the React component hierarchy and each + component's (editable) state and props. +* Redux DevTools: show the application state, including how it changes with + events + +In VSCode (for instance), the following configuration in `.vscode/launch.json` +will enable debugging of the frontend from within VSCode (launch with F5): + +```json +{ + "version": "0.2.0", + "configurations": [ + { + "type": "pwa-chrome", + "request": "launch", + "name": "Launch Chrome against localhost:3000 for frontend dev", + "url": "http://localhost:3000/system-modeller", + "webRoot": "${workspaceFolder}/src/main/webapp", + } + ] +} +``` + +## Backend Development + +If the main web UI is not being changed then it is simpler not to run the +NodeJS server. + +Get a shell on the ssm container (see above). + +Build the code and start the backend server: + +```shell +docker-compose exec ssm bash +cd /code +./gradlew build +./gradlew bootTest +``` + +The bootTest target sets `spring.profiles.active` to `test` but it is not clear +that this has any effect (TODO). It also bundles the Javascript webapp and then +extracts the files. Finally it runs the `./gradlew boot` task which starts a +Tomcat servlet. As a result the whole SSM application works but the frontend is +served from static files that are not hot-reloaded. + +The SSM served by Tomcat can be accessed at + (via the proxy) or direct to Tomcat via port 8081. + +### Debugging the Backend + +Add the flag `--debug-jvm` to any of the usual `gradle` commands and the JVM +will wait for a debugger to connect on guest port 5005. It is the equivalent of +adding `-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005` to +the JVM command line. + +```shell +./gradlew bootTest --debug-jvm +``` + +Then connect to `localhost:5005` from your IDE. + +In VSCode, for instance, debugger connections are configured in +`.vscode/launch.json`. The necessary configuration is: + +```json +{ + "version": "0.2.0", + "configurations": [ + { + "type": "java", + "name": "Attach to Java debugger on localhost:5005 for backend dev", + "request": "attach", + "hostName": "localhost", + "port": 5005 + } + ] +} +``` + +## Shutdown and Persistence + +The containers can be paused (and unpaused) which pauses the processes inside +the container and thus releases host resources but does not lose process state: + +```shell +docker-compose pause +docker-compose unpause +``` + +The containers can be stopped (and started) which will kill all the processes +running in the container but leave the container present: + +```shell +docker-compose stop +docker-compose start +``` + +If you originally used `docker-compose up` to start the containers without +detaching (with `-d`) then `Ctrl-C` is the same as `docker-compose stop`. + +The `docker-compose down` command stops the containers, removes them and +removes the networks they were using. There are also optional parameters to +remove the volumes and images: + +```shell +docker-compose down +``` + +In all these cases, the (Docker disk) volumes are persisted and named volumes +will be reattached to new containers, during restart. Assuming that you have +`reset.on.start=false` in your `application.properties` file, this also means +that any knowledgebases (domain models), system models, palettes, etc will be +persisted after restarting the containers. + +If the intention is to recreate the databases or reinstall the default +knowledgebases, this may be done in the following ways: + +a) Use `docker-compose down -v`, then restart containers and Spyderisk as +normal, e.g. + +```shell +docker-compose down -v +docker-compose up -d +docker-compose exec ssm bash +./gradlew assemble bootTest +``` + +b) Leave containers running, but set `reset.on.start=true` in your +`application.properties` file, then restart Spyderisk, e.g. + +```shell +docker-compose exec ssm bash +./gradlew assemble bootTest +``` + + +## Building a Spyderisk System Modeller Image + +Sometimes, to test something you need to build a "production" image of the sort +built by the CI pipeline. You can then for instance use the image in the +`system-modeller-deployment` project. + +To build a production image use something like: + +`docker build --tag my-ssm-image --build-arg BUILDKIT_INLINE_CACHE=1 --file Dockerfile --target ssm-production .` + +If you need to test the image in the `system-modeller-deployment` project then +just edit the `docker-compose.yml` file in that project to reference +`my-ssm-image` instead of the image held remotely, e.g.: + +```yaml + ssm: + image: my-ssm-image:latest +``` + +When you're done with the image, remove it with `docker image rm my-ssm-image`. + +# OpenAPI + +The OpenAPI v3 documentation is automatically generated from the Java service +code and is available from a live service at + +* `http://server:port/context-path/v3/api-docs`, e.g. + `/system-modeller/v3/api-docs` (for JSON) +* `http://server:port/context-path/v3/api-docs.yaml`, e.g. + `/system-modeller/v3/api-docs.yaml` (for YAML) + +The Swagger UI is also available for browsing the API: + +* `http://server:port/context-path/swagger-ui.html`, e.g. + `/system-modeller/swagger-ui.html` + +The file [openAPI-3-schema.YAML](docs/openapi/openAPI-3-schema.YAML) in this +repository is created by hand by combining the autogenerated YAML file along +with the first few lines of the existing file. + +Note that the object fields `aLabel`, `mLabelhelpful tools ` and `rLabel` used in +`MisbehaviourSet` and `Node` are inconsistent between the OpenAPI file and the +JSON returned by the service. The OpenAPI file suggests they are all lower-case +but in the JSON they are camelCase (`aLabel` etc). To auto-generate effective +client code from the OpenAPI document it may be necessary to first replace +`alabel` with `aLabel` and so on. + +Another change that may be necessary is to replace `date-time` with `int64` +where the following fragment is found: + +```yaml +created: + type: string + format: date-time +``` + +# License checks + +License compliance is a necessary part of software software development. + +The Spyderisk Project takes responsibility for the license compliance of what +we ship. We use a large stack of npm/yarn code that changes without our +knowledge or permission, and there are different licenses within these stacks. +By default we believe what npm and yarn tell us: + +``` +$ npx license-checker --summary +$ yarn licenses list +``` + +However these are only as good as their inputs, and in any case explicitly +disclaim to be authoritative. There are many handy helper tools such as +[npm License Tracker](https://github.com/amittkSharma/npm-license-tracker) +[Repository License Crawler](https://github.com/sinipelto/repo-license-crawler) +which will examine Python, npm etc application trees and give an opinion on which licenses are in use. + +We have documented experiments with one particular helper tool as follows to assist with the +[statements we make about licenses](../LICENSE.md). + +## The LicenseFinder tool + +The [license finder](https://github.com/pivotal/LicenseFinder) software can +generate some opinions about the licences of 3rd-party Javascript code. + +### Installation + +Install `license_finder`: + +```shell +apt-get install ruby +gem install license_finder +``` + +To use `license_finder` in the `webapp` folder, `yarn` (and therefore `npm`) is +also required (rather than the versions built in to the `gradle` plugin): + +```shell +apt-get install nodejs +apt-get install npm +npm install --global yarn +``` + +### Usage + +Decisions on which licences are approved (and why) are kept in the top-level +`dependency_decisions.yml` file. + +To find all licences and check them against the approved list: + +```shell +cd /code +license_finder --decisions-file=/code/dependency_decisions.yml +cd /code/src/main/webapp +license_finder --decisions-file=/code/dependency_decisions.yml +``` + +To generate an HTML report, use the command: + +```shell +license_finder report --format html --quiet --aggregate-paths /code /code/src/main/webapp --decisions-file=/code/dependency_decisions.yml > licences.html +``` + +To generate a CSV report, use the command: + +```shell +license_finder report --quiet --aggregate-paths /code /code/src/main/webapp --decisions-file=/code/dependency_decisions.yml --columns name version authors licenses license_links approved homepage package_manager --write-headers > licences.csv +``` + + diff --git a/docs/JS_BUILD_DIAGRAM.png b/docs/images/JS_BUILD_DIAGRAM.png similarity index 100% rename from docs/JS_BUILD_DIAGRAM.png rename to docs/images/JS_BUILD_DIAGRAM.png diff --git a/docs/images/corerdf-asserted-asset.png b/docs/images/corerdf-asserted-asset.png new file mode 100644 index 00000000..eff23350 Binary files /dev/null and b/docs/images/corerdf-asserted-asset.png differ diff --git a/docs/images/corerdf-control-sets.png b/docs/images/corerdf-control-sets.png new file mode 100644 index 00000000..42e471d0 Binary files /dev/null and b/docs/images/corerdf-control-sets.png differ diff --git a/docs/images/corerdf-control-strategy.png b/docs/images/corerdf-control-strategy.png new file mode 100644 index 00000000..4d5b4a70 Binary files /dev/null and b/docs/images/corerdf-control-strategy.png differ diff --git a/docs/images/corerdf-inferred-asset.png b/docs/images/corerdf-inferred-asset.png new file mode 100644 index 00000000..35c2d693 Binary files /dev/null and b/docs/images/corerdf-inferred-asset.png differ diff --git a/docs/images/corerdf-misbehaviour-sets.png b/docs/images/corerdf-misbehaviour-sets.png new file mode 100644 index 00000000..325a767f Binary files /dev/null and b/docs/images/corerdf-misbehaviour-sets.png differ diff --git a/docs/images/corerdf-threat-graph-causality.png b/docs/images/corerdf-threat-graph-causality.png new file mode 100644 index 00000000..8467a3aa Binary files /dev/null and b/docs/images/corerdf-threat-graph-causality.png differ diff --git a/docs/images/corerdf-threat-graph.png b/docs/images/corerdf-threat-graph.png new file mode 100644 index 00000000..99681ca2 Binary files /dev/null and b/docs/images/corerdf-threat-graph.png differ diff --git a/docs/images/corerdf-trustworthiness-attribute-sets.png b/docs/images/corerdf-trustworthiness-attribute-sets.png new file mode 100644 index 00000000..29c37054 Binary files /dev/null and b/docs/images/corerdf-trustworthiness-attribute-sets.png differ diff --git a/docs/images/corerdf-trustworthiness-impact-set.png b/docs/images/corerdf-trustworthiness-impact-set.png new file mode 100644 index 00000000..3517f94f Binary files /dev/null and b/docs/images/corerdf-trustworthiness-impact-set.png differ diff --git a/docs/images/serscis-base-ontology.png b/docs/images/serscis-base-ontology.png new file mode 100644 index 00000000..12a5d4a4 Binary files /dev/null and b/docs/images/serscis-base-ontology.png differ diff --git a/docs/images/spyderisk-logo.png b/docs/images/spyderisk-logo.png new file mode 100644 index 00000000..dc5658c3 Binary files /dev/null and b/docs/images/spyderisk-logo.png differ diff --git a/docs/openapi/openAPI-3-schema.YAML b/docs/openapi/openAPI-3-schema.YAML index 99f65d21..6cf636cb 100644 --- a/docs/openapi/openAPI-3-schema.YAML +++ b/docs/openapi/openAPI-3-schema.YAML @@ -1,7 +1,7 @@ openapi: 3.0.1 info: title: OpenAPI definition - version: v3.4.0 + version: v3.5.0 description: SPYDERISK System Modeller (SSM) REST API definitions for domain models, user system models and usage by other applications. contact: name: University of Southampton IT Innovation Centre @@ -962,6 +962,38 @@ paths: '*/*': schema: $ref: '#/components/schemas/CreateRelationResponse' + /models/{modelId}/recommendations/{jobId}/cancel: + post: + tags: + - model-controller + operationId: cancelRecJob + parameters: + - name: modelId + in: path + required: true + schema: + type: string + - name: jobId + in: path + required: true + schema: + type: string + responses: + "200": + description: OK + content: + '*/*': + schema: + type: string + enum: + - CREATED + - STARTED + - RUNNING + - FAILED + - FINISHED + - ABORTED + - TIMED_OUT + - UNKNOWN /models/{modelId}/misbehaviours/{misbehaviourId}/revert-impact: post: tags: @@ -1812,6 +1844,114 @@ paths: '*/*': schema: type: object + /models/{modelId}/recommendations: + get: + tags: + - model-controller + operationId: calculateRecommendations + parameters: + - name: modelId + in: path + required: true + schema: + type: string + - name: riskMode + in: query + required: false + schema: + type: string + default: CURRENT + - name: localSearch + in: query + required: false + schema: + type: boolean + default: true + - name: acceptableRiskLevel + in: query + required: true + schema: + type: string + - name: targetURIs + in: query + required: false + schema: + type: array + items: + type: string + responses: + "200": + description: OK + content: + '*/*': + schema: + $ref: '#/components/schemas/JobResponseDTO' + /models/{modelId}/recommendationsprogress: + get: + tags: + - model-controller + description: "Get an update on the progress of the recommendations operation,\ + \ given the ID of the model." + operationId: getRecommendationsProgress + parameters: + - name: modelId + in: path + required: true + schema: + type: string + responses: + "200": + description: recommendations progress + content: + '*/*': + schema: + $ref: '#/components/schemas/Progress' + /models/{modelId}/recommendations/{jobId}/status: + get: + tags: + - model-controller + operationId: checkRecJobStatus + parameters: + - name: modelId + in: path + required: true + schema: + type: string + - name: jobId + in: path + required: true + schema: + type: string + responses: + "200": + description: OK + content: + '*/*': + schema: + $ref: '#/components/schemas/JobResponseDTO' + /models/{modelId}/recommendations/{jobId}/result: + get: + tags: + - model-controller + operationId: downloadRecommendationsReport + parameters: + - name: modelId + in: path + required: true + schema: + type: string + - name: jobId + in: path + required: true + schema: + type: string + responses: + "200": + description: OK + content: + '*/*': + schema: + $ref: '#/components/schemas/RecommendationReportDTO' /models/{modelId}/info: get: tags: @@ -2736,7 +2876,7 @@ paths: get: tags: - json-error-controller - operationId: error_1 + operationId: error_6 responses: "200": description: OK @@ -2747,7 +2887,7 @@ paths: put: tags: - json-error-controller - operationId: error + operationId: error_1 responses: "200": description: OK @@ -2758,7 +2898,7 @@ paths: post: tags: - json-error-controller - operationId: error_3 + operationId: error_2 responses: "200": description: OK @@ -2769,7 +2909,7 @@ paths: delete: tags: - json-error-controller - operationId: error_4 + operationId: error_3 responses: "200": description: OK @@ -2791,7 +2931,7 @@ paths: head: tags: - json-error-controller - operationId: error_6 + operationId: error responses: "200": description: OK @@ -2802,7 +2942,7 @@ paths: patch: tags: - json-error-controller - operationId: error_2 + operationId: error_4 responses: "200": description: OK @@ -3077,6 +3217,8 @@ components: $ref: '#/components/schemas/Level' impactLevelAsserted: type: boolean + normalOpEffect: + type: boolean directCauses: uniqueItems: true type: array @@ -3349,6 +3491,8 @@ components: type: string secondaryThreat: type: boolean + normalOperation: + type: boolean secondaryEffectConditions: uniqueItems: true type: array @@ -3650,6 +3794,8 @@ components: $ref: '#/components/schemas/MisbehaviourSet' secondaryThreat: type: boolean + normalOperation: + type: boolean secondaryEffectConditions: type: object additionalProperties: @@ -4059,6 +4205,84 @@ components: $ref: '#/components/schemas/Graph' uriPrefix: type: string + JobResponseDTO: + type: object + properties: + jobId: + type: string + state: + type: string + message: + type: string + ConsequenceDTO: + type: object + properties: + asset: + $ref: '#/components/schemas/AssetDTO' + label: + type: string + description: + type: string + impact: + type: string + likelihood: + type: string + risk: + type: string + uri: + type: string + ControlDTO: + type: object + properties: + label: + type: string + description: + type: string + uri: + type: string + asset: + $ref: '#/components/schemas/AssetDTO' + action: + type: string + RecommendationDTO: + type: object + properties: + identifier: + type: integer + format: int32 + controlStrategies: + uniqueItems: true + type: array + items: + $ref: '#/components/schemas/ControlStrategyDTO' + controls: + uniqueItems: true + type: array + items: + $ref: '#/components/schemas/ControlDTO' + state: + $ref: '#/components/schemas/StateDTO' + RecommendationReportDTO: + type: object + properties: + current: + $ref: '#/components/schemas/StateDTO' + recommendations: + type: array + items: + $ref: '#/components/schemas/RecommendationDTO' + StateDTO: + type: object + properties: + risk: + type: object + additionalProperties: + type: integer + format: int32 + consequences: + type: array + items: + $ref: '#/components/schemas/ConsequenceDTO' ControlStrategyDB: type: object properties: diff --git a/docs/papers/README.md b/docs/papers/README.md index 390f41ca..a9e446b7 100644 --- a/docs/papers/README.md +++ b/docs/papers/README.md @@ -1,29 +1,37 @@ # Papers and articles -The [DOI system](https://en.wikipedia.org/wiki/Digital_object_identifier) is the standard for uniquely -identifying papers and their metadata. All Spyderisk papers have a DOI. - -In addition, this directory contains the PDFs of papers, articles and reports written by contributors to -Spyderisk. These PDFs are referred to throughout the Spyderisk tree, particularly in -the [Spyderisk history document](./../../HISTORY.md) and having them here -ensures the PDFs are always available given the many systems the originals are -stored in. This scheme also gives a single web address that can be crawled by -[archive.org](https://archive.org), rather than a different destination for every DOI. - -The authors own their own work and have contributed these articles under the CC-BY-SA license -as described in the top-level [licenses/](../../licences/README.md) directory. - -* [surridge2011.pdf](./surridge2011.pdf): M. Surridge, A. Chakravarthy, M. Bashevoy, J. Wright, M. Hall-May, and R. Nossal, ‘SERSCIS-Ont : evaluation of a formal metric model using airport collaborative decision making’, Int. J. Adv. Intell. Syst., vol. 4, no. 3, Art. no. 3, Apr. 2012. doi: 10.5258/SOTON/P1129 - -* [surridge2013.pdf](./surridge2013.pdf): M. Surridge, B. Nasser, X. Chen, A. Chakravarthy, and P. Melas, ‘Run-Time Risk Management in Adaptive ICT Systems’, in 2013 International Conference on Availability, Reliability and Security, Sep. 2013, pp. 102–110. doi: 10.1109/ARES.2013.20. - -* [surridge2018.pdf](./surridge2018.pdf): M. Surridge et al., ‘Trust Modelling in 5G mobile networks’, in Proceedings of the 2018 Workshop on Security in Softwarized Networks: Prospects and Challenges, in SecSoN ’18. New York, NY, USA: Association for Computing Machinery, Aug. 2018, pp. 14–19. doi: 10.1145/3229616.3229621. - -* [surridge2019.pdf](./surridge2019.pdf): M. Surridge et al., ‘Modelling Compliance Threats and Security Analysis of Cross Border Health Data Exchange’, in New Trends in Model and Data Engineering, C. Attiogbé, F. Ferrarotti, and S. Maabout, Eds., in Communications in Computer and Information Science. Cham: Springer International Publishing, 2019, pp. 180–189. doi: 10.1007/978-3-030-32213-7_14. - -* [pickering2021.pdf](./pickering2021.pdf): B. Pickering, C. Boletsis, R. Halvorsrud, S. Phillips, and M. Surridge, ‘It’s Not My Problem: How Healthcare Models Relate to SME Cybersecurity Awareness’, in HCI for Cybersecurity, Privacy and Trust, A. Moallem, Ed., in Lecture Notes in Computer Science. Cham: Springer International Publishing, 2021, pp. 337–352. doi: 10.1007/978-3-030-77392-2_22. - -* [boletsis2022.pdf](./boletsis2022.pdf): C. Boletsis, R. Halvorsrud, J. Pickering, S. Phillips, and M. Surridge, ‘Cybersecurity for SMEs: Introducing the Human Element into Socio-technical Cybersecurity Risk Assessment’, presented at the 12th International Conference on Information Visualization Theory and Applications, Jul. 2022, pp. 266–274. Accessed: Jul. 05, 2022. [Online]. - -* [taylor2022.pdf](./taylor2022.pdf): S. Taylor, P. Melas, K. Meacham, M. Surridge, S. Senior, S. Phillips, 'Dynamic Risk Management Tools: Final -Prototypes and Technical Report', published by the [FogProtect Project](https://fogprotect.eu/). +This directory contains informal copies of academic papers and articles by +contributors to the Spyderisk project and related risk assessment topics. There +are also some other written academic project deliverables such as project +reports. These PDFs are referred to throughout the System Modeller tree, +particularly in the [Spyderisk history document](./../../HISTORY.md), so having +them locally means links always work. + +## Attribution and copyright + +Each of these papers have several authors, but only one appears in the filename +which is a degree of informal attribution. We have made efforts to contact +authors where relevant. The authors of papers own their own work and where +possible have additionally contributed these articles under the CC-BY-SA +license as described in the top-level Spyderisk [LICENSES/](../../LICENSES/README.md) directory. + +| Local copy | Full citation | +| ------------- | ------------- | +| [boniface2010.pdf](./boniface2010.pdf) | S. Bertram, M. Boniface, M. Surridge, N. Briscombe, M. Hall-May, ‘On-Demand Dynamic Security for Risk-Based Secure Collaboration in Clouds’, 2010 IEEE 3rd International Conference on Cloud Computing. doi: [10.1109/CLOUD.2010.83](https://doi.org/10.1109/CLOUD.2010.83) | +| [surridge2010.pdf](./surridge2010.pdf) | M. Bashevoy, A. Chakravarthy, M. Hall-May, M. Surridge, ‘SERSCIS-Ont: A Formal Metrics Model for Adaptive Service Oriented Frameworks’, ADAPTIVE 2010 : The Second International Conference on Adaptive and Self-Adaptive Systems and Applications, Lisbon. doi: **PENDING** | +| [surridge2011-1.pdf](./surridge2011-1.pdf) | M. Hall-May, R. Nossal-Tuyeni, M. Surridge, ‘Resilient critical infrastructure management using service oriented architecture - a test case using airport collaborative decision making’, International Journal of Applied Mathematics and Computer Science. doi: [10.2478/v10006-011-0019-9](https://doi.org/10.2478/v10006-011-0019-9) | +| [surridge2011.pdf](./surridge2011.pdf) | M. Surridge, A. Chakravarthy, M. Bashevoy, J. Wright, M. Hall-May, and R. Nossal, ‘SERSCIS-Ont : evaluation of a formal metric model using airport collaborative decision making’, Int. J. Adv. Intell. Syst., vol. 4, no. 3, Art. no. 3, Apr. 2012. doi: [10.5258/SOTON/P1129](https://doi.org/10.5258/SOTON/P1129) | +| [chakravarthy2012.pdf](./chakravarthy2012.pdf)| M. Surridge, A. Chakravarthy, M. Hall-May, X. Chen, B. Nasse, R. Nossal, ‘SERSCIS: Semantic modelling of dynamic, multi-stakeholder systems’, SIDs 2012 - Proceedings of the SESAR Innovation Days. doi: **PENDING** | +|[surridge2013.pdf](./surridge2013.pdf) | M. Surridge, B. Nasser, X. Chen, A. Chakravarthy, and P. Melas, ‘Run-Time Risk Management in Adaptive ICT Systems’, in 2013 International Conference on Availability, Reliability and Security, Sep. 2013, pp. 102–110. doi: [10.1109/ARES.2013.20](http://doi.org/10.1109/ARES.2013.20) | +|[surridge2018.pdf](./surridge2018.pdf) | M. Surridge et al., ‘Trust Modelling in 5G mobile networks’, in Proceedings of the 2018 Workshop on Security in Softwarized Networks: Prospects and Challenges, in SecSoN ’18. New York, NY, USA: Association for Computing Machinery, Aug. 2018, pp. 14–19. doi: [10.1145/3229616.3229621](https://doi.org/10.1145/3229616.3229621) | +|[surridge2019.pdf](./surridge2019.pdf) | M. Surridge et al., ‘Modelling Compliance Threats and Security Analysis of Cross Border Health Data Exchange’, in New Trends in Model and Data Engineering, C. Attiogbé, F. Ferrarotti, and S. Maabout, Eds., in Communications in Computer and Information Science. Cham: Springer International Publishing, 2019, pp. 180–189. doi: [10.1007/978-3-030-32213-7_14](https://doi.org/10.1007/978-3-030-32213-7_14) | +|[surridge2019-2.pdf](./surridge2019-2.pdf) | L. Goeke, K. Meacham, N. G. Mohammadi, M. Surridge, S. Wiegand, T. Wilkinson, ‘RestAssured Security and Privacy Engineering’, External Report. doi: **PENDING** | +|[surridge2020.pdf](./surridge2020.pdf) | D. Ayed, E. Jaho, C. Lachner, Z. Mann, R. Seidl, M. Surridge, ‘FogProtect: Protecting Sensitive Data in the Computing Continuum’, Advances in Service-Oriented and Cloud Computing - International Workshops of ESOCC 2020. doi: [10.1007/978-3-030-71906-7_17](https://doi.org/10.1007/978-3-030-71906-7_17). | +|[pickering2021.pdf](./pickering2021.pdf) | B. Pickering, C. Boletsis, R. Halvorsrud, S. Phillips, and M. Surridge, ‘It’s Not My Problem: How Healthcare Models Relate to SME Cybersecurity Awareness’, in HCI for Cybersecurity, Privacy and Trust, A. Moallem, Ed., in Lecture Notes in Computer Science. Cham: Springer International Publishing, 2021, pp. 337–352. doi: [10.1007/978-3-030-77392-2_22](https://doi.org/10.1007/978-3-030-77392-2_22)| +|[phillips2021.pdf](./phillips2021.pdf) | L. Carrascal, K. Meacham, P. Melas, F. Rendon, M. Surridge, S. Taylor, S. Senior, ‘Data protection toolkit reducing risks in hospitals and care centers: Release of risk assessment tools.’, doi: [10.3030/826284](https://doi.org/10.3030/826284) - Results tab / Deliverables / Other. +|[boletsis2022.pdf](./boletsis2022.pdf) | C. Boletsis, R. Halvorsrud, J. Pickering, S. Phillips, and M. Surridge, ‘Cybersecurity for SMEs: Introducing the Human Element into Socio-technical Cybersecurity Risk Assessment’, presented at the 12th International Conference on Information Visualization Theory and Applications, Jul. 2022, pp. 266–274. doi: [10.1007/978-3-030-77392-2_22](https://doi.org/10.1007/978-3-030-77392-2_22)| +|[taylor2022.pdf](./taylor2022.pdf) | N. Goetze, K. Meacham, P. Melas, S. Phillips, S. Senior, M. Surridge, S. Taylor, ‘Dynamic Risk Management Tools: Final Prototypes and Technical Report’, [FogProtect Project](https://fogprotect.eu/). doi: [10.3030/871525](https://doi.org/10.3030/871525) - Results tab / Deliverables / Documents and reports. | +|[smart2023.pdf](./smart2023.pdf) | J. Aslam, M. Boniface, Jarwar Aslam, P. Smart, J. Watson, ‘Secure Ontologies for Internet of Things Systems (SOfIoTS)’, report and recommendations for [PETRAS National Centre of Excellence in IoT Systems Cybersecurity](https://petras-iot.org/). doi: **PENDING** | +|[smart2024.pdf](./smart2024.pdf) | M. Boniface, N. Fair, P. Smart, ‘Biomedical Burden Ontology: Ontology Documentation’, report for NHS and partners, [IT Innovation Centre, University of Southampton, Southampton, UK](https://www.it-innovation.soton.ac.uk/). doi: **PENDING** | +|[smart2024-2.pdf](./smart2024-2.pdf) | R. Clowes, R. Heersmink, P. Smart, ‘The Ethics of the Extended Mind: Mental Privacy, Manipulation and Agency’, doi:[10.1007/978-3-662-68362-0_2](https://doi.org/10.1007/978-3-662-68362-0_2) +|[phillips2024.pdf](./phillips2024.pdf) | M. Boniface, S. Modafferi, S. C. Phillips, M. Surridge, S. Taylor, ‘Automated Knowledge-Based Cybersecurity Risk Assessment of Cyber-Physical Systems’, IEEE Access. doi: **PENDING** | diff --git a/docs/papers/boniface2010.pdf b/docs/papers/boniface2010.pdf new file mode 100644 index 00000000..03c9903b Binary files /dev/null and b/docs/papers/boniface2010.pdf differ diff --git a/docs/papers/chakravarthy2012.pdf b/docs/papers/chakravarthy2012.pdf new file mode 100644 index 00000000..c01e4364 Binary files /dev/null and b/docs/papers/chakravarthy2012.pdf differ diff --git a/docs/papers/phillips2021.pdf b/docs/papers/phillips2021.pdf new file mode 100644 index 00000000..acd7dc45 Binary files /dev/null and b/docs/papers/phillips2021.pdf differ diff --git a/docs/papers/phillips2024.pdf b/docs/papers/phillips2024.pdf new file mode 100644 index 00000000..d25df7bb Binary files /dev/null and b/docs/papers/phillips2024.pdf differ diff --git a/docs/papers/smart2023.pdf b/docs/papers/smart2023.pdf new file mode 100644 index 00000000..ea5c980f Binary files /dev/null and b/docs/papers/smart2023.pdf differ diff --git a/docs/papers/smart2024-2.pdf b/docs/papers/smart2024-2.pdf new file mode 100644 index 00000000..5ae52c46 Binary files /dev/null and b/docs/papers/smart2024-2.pdf differ diff --git a/docs/papers/smart2024.pdf b/docs/papers/smart2024.pdf new file mode 100644 index 00000000..623fa6dc Binary files /dev/null and b/docs/papers/smart2024.pdf differ diff --git a/docs/papers/surridge2010.pdf b/docs/papers/surridge2010.pdf new file mode 100644 index 00000000..870e227f Binary files /dev/null and b/docs/papers/surridge2010.pdf differ diff --git a/docs/papers/surridge2011-1.pdf b/docs/papers/surridge2011-1.pdf new file mode 100644 index 00000000..3c3b2b30 Binary files /dev/null and b/docs/papers/surridge2011-1.pdf differ diff --git a/docs/papers/surridge2019-2.pdf b/docs/papers/surridge2019-2.pdf new file mode 100644 index 00000000..4b628915 Binary files /dev/null and b/docs/papers/surridge2019-2.pdf differ diff --git a/docs/papers/surridge2020.pdf b/docs/papers/surridge2020.pdf new file mode 100644 index 00000000..d3d87b74 Binary files /dev/null and b/docs/papers/surridge2020.pdf differ diff --git a/docs/triple-store-schema.md b/docs/triple-store-schema.md new file mode 100644 index 00000000..08d6135e --- /dev/null +++ b/docs/triple-store-schema.md @@ -0,0 +1,491 @@ +Describes version FIXME of the [Spyderisk core model](https://github.com/Spyderisk/system-modeller/blob/dev/src/main/resources/core.rdf). + +This document is not strictly limited to the core, because it also covers some things needed for system modelling. + +Includes notes on versions FIXME-FIXME of the core model + +# Introduction + +This document aims to describe how we represent various aspects of the +models in the Triplestore. It originates from personal notes by Mike Surridge. + +This document is very much incomplete and a work in progress. + +# Notation + +The diagrams directly show the models as represented in the Triplestore. +A triple (subject, predicate, object) is shown on the diagram as two +things linked by an arrow. The things and the arrows are labelled with +abbreviated forms of the URIs: + +- core#something →\ + http://it-innovation.soton.ac.uk/ontologies/trustworthiness/core#something + +- domain#something →\ + http://it-innovation.soton.ac.uk/ontologies/trustworthiness/domain#something + +- system#something →\ + http://it-innovation.soton.ac.uk/ontologies/trustworthiness/system#something + +- rdf#type →\ + http://www.w3.org/1999/02/22-rdf-syntax-ns#type + +- rdf#label →\ + http://www.w3.org/2000/01/rdf-schema#label + +- rdf#comment →\ + http://www.w3.org/2000/01/rdf-schema#comment + +- \ → a literal Boolean, e.g. + \"true\"\^\^http://www.w3.org/2001/XMLSchema#boolean + +- \ → a literal Integer, e.g. + \"4853\"\^\^\ + +- \ → a literal String + +The fragment identifiers of the URIs are (in the system model) sometimes +constructed from various strings. For instance, a trustworthiness +attribute set URI may be "system#TWAS-Authenticity-2aaa4816" where +"Authenticity" is the trustworthiness attribute and "2aaa4816" is the ID +of the asset object. To show how different URIs relate, these URIs are +written as templates with variable interpolation, such as +"system#TWAS-{TWA}-{AssetID}", meaning replace "{TWA}" with the actual +trustworthiness attribute name and replace "{AssetID}" with the actual +asset ID. + +Triples in the inferred graph are shown with blue arrows and those in +the system graph are shown with black arrows. + +The different elements are coloured according to: + +- System URI: green + +- Core URI: orange + +- Domain URI: pink + +White boxes/ovals are asserted (and should also have black arrows). + +Need to indicate what is added by validator and what is added by risk +calculation. + +Rectangles are entities, and ovals are literals/attributes. + +# System Model + +## Asserted Asset + +![Asserted Asset](./images/corerdf-asserted-asset.png "Asserted Asset") + +When the user adds an Asset to the canvas, they are asserting the +existence and type. They may choose to change the default population +size (singleton) and may choose to change the default label. The Java +service adds the AssetID and this also goes in the asserted graph. + +- Asset: the asset URI fragment (a Java object hash for asserted + assets or something constructed including AssetIDs for inferred + assets) + +- AssetID: the ID of an asset (8 hex digits) + +- AssetType: the asset type (e.g. "Data") + +- PopulationSize: e.g. "PopLevelSingleton" + +## Inferred Asset + +![Inferred Asset](./images/corerdf-inferred-asset.png "Inferred Asset") + +An inferred Asset has the same triples but in the inferred graph. + +## Asset Relations / Cardinality Constraint + +- rdf#type: core#CardinalityConstraint + +- core#linkType: domain#{relationID} + +- core#linksFrom: system#{AssetID} + +- core#linksTo: system#{AssetID} + +- core#sourceCardinality: Integer + +- core#targetCardinality: Integer + +## Trustworthiness Attribute Sets (TWAS) + +![Trustworthiness Attribute Sets](./images/corerdf-trustworthiness-attribute-sets.png "Trustworthiness Attribute Sets") + +A TWAS links a Trustworthiness Attribute to a specific Asset: + +- TWA: a specific trustworthiness attribute (e.g. "Authenticity") + +- Asset: the asset + +The TWAS is generated by the validation. + +In addition: + +- TWLevel: e.g. "TrustworthinessLevelVeryLow" with an inferred and + asserted value + +- core#causesThreat: domain#{ThreatID} -- linking to a Primary Threat + caused by a MS which degrades the TWAS + +- core#isExternalCause: either "true" or the triple doesn't exist + +- The TWAS label, comment and visibility can be found in the domain + model + +## Misbehaviour Sets (MS) + +![Misbehaviour Sets](./images/corerdf-misbehaviour-sets.png "Misbehaviour Sets") + +Add core#causedBy system#{ThreatID} (0 to many) + +Add core#causesThreat: domain#{ThreatID} -- linking to Secondary Threat +caused directly by the MS or a Primary Threat caused by a TWAS that is +degraded by the MS (via TWIS). i.e. the likely cause. + +Add core#isExternalCause (true, or missing): indicating MS whose +likelihood is determined only by external TWL assertions and not +increased by any threats. + +A Misbehaviour Set links a Misbehaviour (consequence) with an Asset: + +- M: a specific misbehaviour (e.g. "LossOfSomething") + +- Asset: the asset + +Also: + +- ImpactLevel: e.g. "ImpactLevelMedium", with both inferred and + asserted values + +- LikelihoodLevel: e.g. "LikelihoodLevelHigh" which uses (the legacy) + "core#hasPrior" as the predicate + +- RiskLevel: e.g. "RiskLevelVeryLow" + +- The misbehaviour label, comment and visibility can be found in the + domain model + +## Control Sets (CS) + +![Control Sets](./images/corerdf-control-sets.png "Control Sets") + +A Control Set links together a Control and an Asset: + +- C: a specific control (e.g. "PhysicalLock") + +- Asset: the asset + +Also: + +- The core#isProposed is present in the asserted graph if the green + traffic light is on. + +- The core#isWorkInProgress triple is only present if the amber light + is on. In this case, the core#isProposed triple is also put into the + asserted graph. + +- The core#isProposed and core#isWorkInProgress in the inferred graph + should probably not be there. + +- core#hasCoverageLevel has inferred and asserted levels + + - what about default? Why is there an inferred level? + + - The coverage refers to the likelihood of the CS being + found/effective when sampling the system (sampling both the + population of assets if there is more than 1 and sampling over + time). + +- The Control label, comment and visibility can be found in the domain + model. + +- The intention is to add a property indicating that a Control Set can + be used at runtime (see + [#74](https://github.com/Spyderisk/system-modeller/issues/74)) + +## Control Strategy (CSG) + +![Control Strategy](./images/corerdf-control-strategy.png "Control Strategy") + +A *system model* Control Strategy links multiple Control Sets: + +- core#hasMandatoryCS: Control Sets that must be enabled for the CSG + to be enabled + +- core#hasOptionalCS: what are these for? Example? + +Also the *system model* CSG has: + +- core#hasCoverageLevel: + +- rdf#comment: the description of the CSG, incorporating actual asset + labels into the template found on the domain model CSG + +- core#mitigates: old predicate meaning that it is only applicable in + future risk calculations + +- core#blocks: the specific Threats that this CSG will reduce the + likelihood of (if all its mandatory CS are active ("inProgress")) + +The URI of the CSG may also have "-Implementation" and/or "-Runtime" +appended as a kludge. Meaning: + +- the CSG can be enabled at runtime, if and only if it represents + activation of a contingency plan, and some dependency is satisfied. + For a CSG of this type, the URI ends \'-Implementation\'. + +- the CSG can be enabled at runtime, if and only if it represents + activation of a contingency plan, with no other dependency. For a + CSG of this type, the URI ends \'-Implementation-Runtime\'. + +- the CSG can be enabled at runtime, and doesn\'t need to be a + pre-planned contingency measure. For a CSG of this type, the URI + ends \'-Runtime\' (without the \'-Implementation\' string). + +- the CSG cannot be enabled at runtime. A CSG of this type has a URI + that does not include the strings \'-Implementation\' or + \'-Runtime\'. + +What CSGs can be used for design time? + +The *domain model* CSG has: + +- core#isFutureRisk: set to \'false\' if the CSG should be ignored in + future risk calculations + +- core#isCurrentRisk: set to \'false\' if the CSG should be ignored in + current risk calculations + +- core#hasBlockingEffect: indicating the "effectiveness" of the CSG. + Generally set to "TrustworthinessLevelSafe" unless there is a + temporal factor that means that the CSG will not always work (for + instance in SoftwarePatching there is a delay between the patch + being needed and being applied). The effectiveness also used to be + used to distinguish e.g. good and bad passwords but this is no + longer done and different threats are used instead. + +## Trustworthiness Impact Set (TWIS) + +![Trustworthiness Impact Set](./images/corerdf-trustworthiness-impact-set.png "Trustworthiness Impact Set") + +A Trustworthiness Impact Set links a TWAS to a MS: + +- M1: a misbehaviour (e.g. "LossOfAuthenticity") + +- TWA: a trustworthiness attribute (e.g. "Authenticity") + +- Asset1: the URI fragment for an asset (can be a Java object hash or + something constructed) + +- AssetID1: the ID of Asset1 (8 hex digits) + +All links from the TWIS are shown, other than that just the link to the +shared asset is shown to reinforce the fact the linked TWAS and MS are +at the same Asset. + +## Primary Threat + +A Threat in the system model is added based on a Matching Pattern being +found in the system model. This includes TWAS, MS and CSG. They key +elements (before risk calculation) are: + +- rdf#label, rdf#comment, rdf#type + +- core#appliesTo: system#{MatchingPatternID} + + - Link to the Matching Pattern which generated the Threat + +- core#blockedBy: system#CSG (also core#blocks in the other direction) + (0 to many) + + - Control Strategies which are present in the system model which + may reduce the likelihood of the Threat + + - Why both? + +- core#causesMisbehaviour: system#{MSID} + + - Misbehaviours which are directly "caused" by the Threat + (disregarding any likelihood calculation) + + - If we want to know the Assets "threatened" by the Threat then + the locations of these Misbehaviours should be used + +- core#hasEntryPoint: system#{TWASID} + + - The TWAS from which may directly cause the Threat + +- core#hasFrequency: domain#{LikelihoodLevel} + + - Used to be used to take account of factors such as attack + complexity or resource requirements + + - Now used purely for temporal + +- core#isNormalOp: Boolean (either true, or the triple doesn't exist) + + - A "Normal Operation" is one that is expected, rather than part + of an attack (Threats that are not a normal-op Threat may be + called "adverse" Threats, but there is no predicate for them) + + - This is defined for the Threat in the domain model + +- core#parent: domain#{ThreatID} + + - Links to the Threat in the domain model + +- core#threatens: system#{AssetID} -- deprecated as it can only refer + to one asset + +- core#triggers: system#{ThreatID} (also core#triggeredBy in the other + direction) (0 to many) + + - Control Strategies which must be active for the Threat's + likelihood to be anything more than the minimum + +## Secondary Threat + +Key secondary threat elements (before risk calculation) are: + +- rdf#label, rdf#comment, rdf#type + +- core#appliesTo: system#{MatchingPatternID} + +- core#blockedBy: system#CSG (also core#blocks in the other + direction)??? + +- core#causesMisbehaviour: system#{MSID} -- can be multiple? And + therefore directly affecting multiple assets + +- core#hasFrequency: domain#{LikelihoodLevel} + +- core#hasSecondaryEffectCondition: domain#{MSID} -- 1 to many + +- core#isSecondaryThreat: Boolean (either true, or the triple doesn't + exist) + + - We used to identify secondary threats by looking to see if they + had secondary effect conditions but sometimes these now appear + in "mixed cause" threats, so this predicate was added to + identify pure secondary threats. + +- core#parent: domain#{ThreatID} + +- core#threatens: system#{AssetID} -- deprecated as it can only refer + to one asset + +## Threat Graph + +The MS, TWIS, TWAS, Primary and Secondary Threats link together as +follows, showing how M1 on Asset1 can "cause" M2 on Asset2 either via a +\[TWIS, a TWAS and a Primary Threat\] or via a Secondary Threat. + +![Threat Graph](./images/corerdf-threat-graph.png "Threat Graph") + +The risk calculation adds the following to the system model Threats: + +- core#hasPrior: domain#{LikelihoodLevel} + + - Likelihood of the Threat + +- core#hasRisk: domain#{RiskLevel} + + - System risk level of the Threat, set to the highest risk level + of any Misbehaviour it (in)directly causes + +The attack graph calculation works out the "causal" parts of the larger +graph, linking Threats and Misbehaviours based on the likelihoods +calculated by the risk calculation. It adds these triples: + +- core#causedBy: system#{TWASID} + + - Links the Threat to the TWAS which cause it + + - This is a subset of the TWAS found through the + core#hasEntryPoint predicate + +- core#causesDirectMisbehaviour: system#{MSID} + + - Can be read "directly causes misbehaviour" + + - Links the Threat to MS which the Threat directly causes + + - This is a subset of the MS found through the + core#causesMisbehaviour predicate + +- core#causesIndirectMisbehaviour: system#{MSID} + + - Can be read "indirectly causes misbehaviour" + + - Links the Threat to Misbehaviours whose direct cause is on an + attack path involving the Threat + + - Only added to Threats which are at the start of a threat path + (Root Causes and Initial Causes) + +- core#causesIndirectThreat: system#{ThreatID} + + - Can be read "indirectly causes threat" + + - Links the Threat to Threats whose direct cause is on an attack + path involving the Threat + + - Only added to Threats which are at the start of a threat path + (Root Causes and Initial Causes) + +- core#isRootCause: Boolean (either true, or the triple doesn't exist) + + - A "Root Cause" is a threat whose likelihood depends only on + \'normal operation\' Threats + + - Where the likelihood based on the inferred trustworthiness + values is equal or lower than that calculated from the asserted + trustworthiness values + + - Found at the start of Attack Graphs + +- core#isInitialCause: Boolean (either true, or the triple doesn't + exist) + + - An "Initial Cause" is a Threat whose likelihood does not depend + on any other + + - Initial Causes are found at the start of Threat Paths + +The following Figure shows a graph of (Primary) Threats, MS, TWIS, TWAS +with the causal path highlighted with bold boxes and the additional +relations added with dashed lines: + +- No Levels or Assets are shown. + +- Threat1 and Threat2 are Root Causes. + +- Threat3 has two entry points, but in this example, we are taking + TWAS1 to be sufficient but not necessary and with a higher inferred + trustworthiness value than TWAS2). TWAS2 is therefore calculated to + be the "cause" necessary to explain the likelihood of the threat + (hence core#causesThreat is added). + +- MS3 has two Threats which can cause it, but (given the likelihoods + of Threat3 and Threat4) Threat3 is calculated to be the "cause". + +- Threat3 is a Primary Threat and the MS preceding it in the graph is + MS2 which is linked directly to it with the "core#causesThreat" + predicate. + +- Threat4 is a Secondary Threat. It has one secondary effect condition + which is therefore also the one which "causes" it (hence + "core#causesThreat"). + +![Threat Graph Causality](./images/corerdf-threat-graph-causality.png "Threat Graph Causality") + +When looking at the highest likelihood threat graph to cause a MS, we +can just navigate using the core#causesDirectMisbehaviour and +core#causesThreat predicates: jumping between Threats and MS (regardless +of whether they are Primary, Secondary or mixed cause Threats). diff --git a/src/main/java/uk/ac/soton/itinnovation/security/model/system/RiskLevelCount.java b/src/main/java/uk/ac/soton/itinnovation/security/model/system/RiskLevelCount.java index 56052540..81c20a83 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/model/system/RiskLevelCount.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/model/system/RiskLevelCount.java @@ -24,14 +24,15 @@ ///////////////////////////////////////////////////////////////////////// package uk.ac.soton.itinnovation.security.model.system; +import java.util.Objects; import uk.ac.soton.itinnovation.security.model.Level; -public class RiskLevelCount { - +public class RiskLevelCount implements Comparable { + private Level level; - + private int count; - + public RiskLevelCount() { } @@ -50,6 +51,30 @@ public int getCount() { public void setCount(int count) { this.count = count; } - - + + @Override + public int compareTo(RiskLevelCount other) { + // Compare levels first + int levelComparison = this.level.compareTo(other.level); + if (levelComparison != 0) { + return levelComparison; + } + + // If levels are equal, compare counts + return Integer.compare(this.count, other.count); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + RiskLevelCount that = (RiskLevelCount) obj; + return count == that.count && Objects.equals(level, that.level); + } + + @Override + public int hashCode() { + return Objects.hash(level, count); + } + } diff --git a/src/main/java/uk/ac/soton/itinnovation/security/model/system/RiskVector.java b/src/main/java/uk/ac/soton/itinnovation/security/model/system/RiskVector.java index d0dcf33f..79829810 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/model/system/RiskVector.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/model/system/RiskVector.java @@ -25,55 +25,127 @@ package uk.ac.soton.itinnovation.security.model.system; import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.ArrayList; import java.util.HashMap; import java.util.Map; +import java.util.Objects; + import uk.ac.soton.itinnovation.security.model.Level; -public class RiskVector { - - private Map riskVector; - - public RiskVector(Collection riskLevels, Map riskLevelCounts) { - riskVector = new HashMap<>(); - - //For each defined risk level, get the count of misbehaviours at this level - for (Level riskLevel : riskLevels) { - RiskLevelCount riskLevelCount = new RiskLevelCount(); - riskLevelCount.setLevel(riskLevel); - Integer count = riskLevelCounts.get(riskLevel.getUri()); - riskLevelCount.setCount(count); - riskVector.put(riskLevel.getUri(), riskLevelCount); - } - } - - public Map getRiskVector() { - return riskVector; - } - - public void setRiskVector(Map riskVector) { - this.riskVector = riskVector; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - - sb.append("("); - - Collection riskLevelCounts = riskVector.values(); - - for (RiskLevelCount riskLevelCount : riskLevelCounts) { - sb.append(riskLevelCount.getLevel().getLabel()); - sb.append(": "); - sb.append(riskLevelCount.getCount()); - sb.append(", "); - } - - sb.setLength(sb.length() -2); //remove last comma - - sb.append(")"); - - return sb.toString(); - } +public class RiskVector implements Comparable { + + private Map riskV; + private Map levelValueMap; // aux map for comparison + + public RiskVector(Collection riskLevels, Map riskLevelCounts) { + this.riskV = new HashMap<>(); + this.levelValueMap = new HashMap<>(); + + //For each defined risk level, get the count of misbehaviours at this level + for (Level riskLevel : riskLevels) { + RiskLevelCount riskLevelCount = new RiskLevelCount(); + riskLevelCount.setLevel(riskLevel); + Integer count = riskLevelCounts.get(riskLevel.getUri()); + riskLevelCount.setCount(count); + riskV.put(riskLevel.getUri(), riskLevelCount); + levelValueMap.put(riskLevel.getValue(), riskLevel.getUri()); + } + } + + public Map getRiskVector() { + return riskV; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("("); + + // put the items from riskLevelCounts in a list + List riskLevelCounts = new ArrayList<>(riskV.values()); + + // sort the riskLevelCounts entries by the RiskLevelCount object's default sort + Collections.sort(riskLevelCounts); + + for (RiskLevelCount riskLevelCount : riskLevelCounts) { + sb.append(riskLevelCount.getLevel().getLabel()); + sb.append(": "); + sb.append(riskLevelCount.getCount()); + sb.append(", "); + } + + sb.setLength(sb.length() - 2); //remove last comma + + sb.append(")"); + + return sb.toString(); + } + + public String getOverall() { + int overall = 0; + String uri = ""; + for (Map.Entry entry : riskV.entrySet()) { + String riskLevelUri = entry.getValue().getLevel().getUri(); + int riskLevelValue = entry.getValue().getLevel().getValue(); + int riskCount = entry.getValue().getCount(); + if (riskCount > 0 && riskLevelValue >= overall) { + overall = riskLevelValue; + uri = riskLevelUri; + } + } + return uri; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + RiskVector other = (RiskVector) obj; + return Objects.equals(riskV, other.riskV); + } + + @Override + public int hashCode() { + return Objects.hashCode(riskV); + } + + @Override + public int compareTo(RiskVector other) { + + List sortedKeys = new ArrayList<>(levelValueMap.keySet()); + Collections.sort(sortedKeys, Collections.reverseOrder()); + + // iterate based on the sorted keys + for (Integer key : sortedKeys) { + String riskLevelUri = levelValueMap.get(key); + RiskLevelCount thisRiskLevelCount = riskV.get(riskLevelUri); + RiskLevelCount otherRiskLevelCount = other.riskV.get(riskLevelUri); + + if (thisRiskLevelCount == null && otherRiskLevelCount == null) { + continue; // Both are missing + } + if (thisRiskLevelCount == null) { + return -1; // This object is considered "less" + } + if (otherRiskLevelCount == null) { + return 1; // This object is considered "greater" + } + + // Compare RiskLevelCount objects + int result = thisRiskLevelCount.compareTo(otherRiskLevelCount); + if (result != 0) { + return result; + } + } + // If all compared RiskLevelCount objects are equal, consider the RiskVectors equal + return 0; + } } diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/IQuerierDB.java b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/IQuerierDB.java index 11ffabeb..8eee164b 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/IQuerierDB.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/IQuerierDB.java @@ -217,8 +217,11 @@ public interface IQuerierDB { */ void repairAssertedAssetPopulations(); void repairCardinalityConstraints(); + boolean updateAssertedLevel(LevelDB level, String twasURI, String model); boolean updateAssertedLevel(LevelDB level, TrustworthinessAttributeSetDB twas, String model); + boolean updateCoverageLevel(LevelDB level, String csURI, String model); boolean updateCoverageLevel(LevelDB level, ControlSetDB cs, String model); + boolean updateProposedStatus(Boolean status, String csURI, String model); boolean updateProposedStatus(Boolean status, ControlSetDB cs, String model); /** diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/JenaQuerierDB.java b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/JenaQuerierDB.java index 7dac93c7..3309673a 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/JenaQuerierDB.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/JenaQuerierDB.java @@ -158,6 +158,8 @@ public void initForRiskCalculation(){ } public void init(){ + logger.info("Initialising JenaQuerierDB"); + final long startTime = System.currentTimeMillis(); this.prefixMap = dataset.getNamedModel(stack.getGraph("core")).getNsPrefixMap(); @@ -3390,13 +3392,13 @@ private void fixCardinalityConstraintURI(Map cc } - /* Method to override the assumed TW level of a TWAS in a graph without creating the TWAS + /* Methods to override the assumed TW level of a TWAS in a graph without creating the TWAS * in the same graph. Needed to adjust user/client asserted levels which appear as single * triples in the asserted graph, but without the TWAS entity (which is added later by the * validator in the inferred graph). */ @Override - public boolean updateAssertedLevel(LevelDB level, TrustworthinessAttributeSetDB twas, String model){ + public boolean updateAssertedLevel(LevelDB level, String twasURI, String model){ String graphUri = stack.getGraph(model); if (graphUri == null) { return false; @@ -3404,27 +3406,54 @@ public boolean updateAssertedLevel(LevelDB level, TrustworthinessAttributeSetDB Model datasetModel = dataset.getNamedModel(graphUri); // Encode the population level as a single property of the asset resource - Resource resource = datasetModel.getResource(getLongName(twas.getUri())); + Resource resource = datasetModel.getResource(getLongName(twasURI)); Property property = ResourceFactory.createProperty(getLongName("core#hasAssertedLevel")); RDFNode object = ResourceFactory.createResource(getLongName(level.getUri())); // Now remove the old value and save the new value - dataset.begin(ReadWrite.WRITE); - resource.removeAll(property); - resource.addProperty(property, object); - dataset.commit(); - dataset.end(); + try { + dataset.begin(ReadWrite.WRITE); + resource.removeAll(property); + resource.addProperty(property, object); + dataset.commit(); + } + catch (Exception e) { + // Abort the changes and signal that there has been an error + dataset.abort(); + String message = String.format("Error occurred while updating assumed TW level for TWAS %s", twasURI); + logger.error(message, e); + throw new RuntimeException(message, e); + } + finally { + dataset.end(); + } + + if(cacheEnabled){ + // Make the same change in the cached object, if it exists + TrustworthinessAttributeSetDB twas = this.getTrustworthinessAttributeSet(twasURI, model); + if(twas != null) { + twas.setAssertedLevel(level.getUri()); + this.store(twas, model); + } + + // Note that the calling process must change ControlSetDB objects for other graphs } + } return true; } - /* Method to override the coverage level of a CS in a graph without creating the CS in the + @Override + public boolean updateAssertedLevel(LevelDB level, TrustworthinessAttributeSetDB twas, String model){ + return updateAssertedLevel(level, twas.getUri(), model); + } + + /* Methods to override the coverage level of a CS in a graph without creating the CS in the * same graph. Needed to adjust user/client supplied coverage levels which appear as single * triples in the asserted graph, but without the CS entity (which is added later by the * validator in the inferred graph). */ @Override - public boolean updateCoverageLevel(LevelDB level, ControlSetDB cs, String model){ + public boolean updateCoverageLevel(LevelDB level, String csURI, String model){ String graphUri = stack.getGraph(model); if (graphUri == null) { return false; @@ -3432,27 +3461,54 @@ public boolean updateCoverageLevel(LevelDB level, ControlSetDB cs, String model) Model datasetModel = dataset.getNamedModel(graphUri); // Encode the population level as a single property of the asset resource - Resource resource = datasetModel.getResource(getLongName(cs.getUri())); + Resource resource = datasetModel.getResource(getLongName(csURI)); Property property = ResourceFactory.createProperty(getLongName("core#hasCoverageLevel")); RDFNode object = ResourceFactory.createResource(getLongName(level.getUri())); // Now remove the old value and save the new value - dataset.begin(ReadWrite.WRITE); - resource.removeAll(property); - resource.addProperty(property, object); - dataset.commit(); - dataset.end(); + try { + dataset.begin(ReadWrite.WRITE); + resource.removeAll(property); + resource.addProperty(property, object); + dataset.commit(); + } + catch (Exception e) { + // Abort the changes and signal that there has been an error + dataset.abort(); + String message = String.format("Error occurred while updating control coverage level for CS %s", csURI); + logger.error(message, e); + throw new RuntimeException(message, e); + } + finally { + dataset.end(); + } + + if(cacheEnabled){ + // Make the same change in the cached object, if it exists + ControlSetDB cs = this.getControlSet(csURI, model); + if(cs != null) { + cs.setCoverageLevel(level.getUri()); + this.store(cs, model); + } + + // Note that the calling process must change ControlSetDB objects for other graphs } + } return true; } - /* Method to override the proposed status of a CS in a graph without creating the CS in the + @Override + public boolean updateCoverageLevel(LevelDB level, ControlSetDB cs, String model){ + return updateCoverageLevel(level, cs.getUri(), model); + } + + /* Methods to override the proposed status of a CS in a graph without creating the CS in the * same graph. Needed to adjust user/client supplied status flags which appear as single * triples in the asserted graph, but without the CS entity (which is added later by the * validator in the inferred graph). */ @Override - public boolean updateProposedStatus(Boolean status, ControlSetDB cs, String model){ + public boolean updateProposedStatus(Boolean status, String csURI, String model){ String graphUri = stack.getGraph(model); if (graphUri == null) { return false; @@ -3460,19 +3516,45 @@ public boolean updateProposedStatus(Boolean status, ControlSetDB cs, String mode Model datasetModel = dataset.getNamedModel(graphUri); // Encode the population level as a single property of the asset resource - Resource resource = datasetModel.getResource(getLongName(cs.getUri())); + Resource resource = datasetModel.getResource(getLongName(csURI)); Property property = ResourceFactory.createProperty(getLongName("core#isProposed")); // Now remove the old value and save the new value - dataset.begin(ReadWrite.WRITE); - resource.removeAll(property); - resource.addLiteral(property, status.booleanValue()); - dataset.commit(); - dataset.end(); + try { + dataset.begin(ReadWrite.WRITE); + resource.removeAll(property); + resource.addLiteral(property, status.booleanValue()); + dataset.commit(); + } + catch (Exception e) { + // Abort the changes and signal that there has been an error + dataset.abort(); + String message = String.format("Error occurred while updating control proposed status for CS %s", csURI); + logger.error(message, e); + throw new RuntimeException(message, e); + } + finally { + dataset.end(); + } + + if(cacheEnabled){ + // Make the same change in the cached object, if it exists + ControlSetDB cs = this.getControlSet(csURI, model); + if(cs != null) { + cs.setProposed(status); + this.store(cs, model); + } + + // Note that the calling process must change ControlSetDB objects for other graphs + } return true; } + @Override + public boolean updateProposedStatus(Boolean status, ControlSetDB cs, String model){ + return updateProposedStatus(status, cs.getUri(), model); + } /* Internal class passed to the Querier's GsonBuilder */ diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/SystemModelUpdater.java b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/SystemModelUpdater.java index 1bf92c97..23224bf5 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/SystemModelUpdater.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/SystemModelUpdater.java @@ -42,6 +42,7 @@ import uk.ac.soton.itinnovation.security.model.system.Relation; import uk.ac.soton.itinnovation.security.model.system.TrustworthinessAttributeSet; import uk.ac.soton.itinnovation.security.modelquerier.util.ModelStack; +import uk.ac.soton.itinnovation.security.modelquerier.util.QuerierUtils; import uk.ac.soton.itinnovation.security.semanticstore.AStoreWrapper; import uk.ac.soton.itinnovation.security.semanticstore.util.SparqlHelper; @@ -661,42 +662,6 @@ public Set updateControlSet(AStoreWrapper store, ControlSet cs) { return controlSets; } - private Set getExpandedControlSets(Set controlSets) { - Set expandedControlSets = new HashSet<>(); - - for (String cs : controlSets) { - Set expCs = getControlTriplet(cs); - expandedControlSets.addAll(expCs); - } - - return expandedControlSets; - } - - private Set getControlTriplet(String csuri) { - String[] uriFrags = csuri.split("#"); - String uriPrefix = uriFrags[0]; - String shortUri = uriFrags[1]; - - String [] shortUriFrags = shortUri.split("-"); - String control = shortUriFrags[0] + "-" + shortUriFrags[1]; - control = control.replace("_Min", "").replace("_Max", ""); - String assetId = shortUriFrags[2]; - - //logger.debug("control: {}", control); - //logger.debug("assetId: {}", assetId); - - String csAvg = uriPrefix + "#" + control + "-" + assetId; - String csMin = uriPrefix + "#" + control + "_Min" + "-" + assetId; - String csMax = uriPrefix + "#" + control + "_Max" + "-" + assetId; - - //logger.debug("csAvg: {}", csAvg); - //logger.debug("csMin: {}", csMin); - //logger.debug("csMax: {}", csMax); - - Set controlSets = new HashSet<>(Arrays.asList(csAvg, csMin, csMax)); - return controlSets; - } - /** * Toggle the proposed status of multiple control sets * @@ -713,7 +678,7 @@ public Set updateControlSets(AStoreWrapper store, Set controlSet throw new IllegalArgumentException("Controls cannot be work in progress but not proposed"); } - Set expandedControlSets = getExpandedControlSets(controlSets); + Set expandedControlSets = QuerierUtils.getExpandedControlSets(controlSets); for (String cs : expandedControlSets) { logger.debug("control set {}, proposed: {}", cs, proposed); diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/dto/ControlStrategyDB.java b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/dto/ControlStrategyDB.java index 09fb19ed..59be9563 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/dto/ControlStrategyDB.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/dto/ControlStrategyDB.java @@ -86,15 +86,39 @@ public ControlStrategyDB() { private String hasMin; // Pointer from CSG for an average likelihood Threat to the CSG for the lowest likelihood Threat private String hasMax; // Pointer from CSG for an average likelihood Threat to the CSG for the highest likelihood Threat + /** + * Returns true if this CSG is relevant in current risk calculations + */ public boolean isCurrentRisk() { // If the property doesn't exist, default to true return currentRisk != null ? currentRisk : true; } + public void setCurrentRisk(Boolean value){ + if(value == null || value) { + // If the property doesn't exist, it is equivalent to true + this.currentRisk = null; + } else { + // So it only needs to be stored if false + this.currentRisk = value; + } + } + /** + * Returns true if this CSG is relevant in future risk calculations + */ public boolean isFutureRisk() { // If the property doesn't exist, default to true return futureRisk != null ? futureRisk : true; } + public void setFutureRisk(Boolean value){ + if(value == null || value) { + // If the property doesn't exist, it is equivalent to true + this.currentRisk = null; + } else { + // So it only needs to be stored if false + this.currentRisk = value; + } + } public boolean isEnabled() { // If the property doesn't exist, default to false diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/dto/ThreatDB.java b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/dto/ThreatDB.java index 994c82d3..8b5e0cbb 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/dto/ThreatDB.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/dto/ThreatDB.java @@ -189,10 +189,13 @@ public boolean isCurrentRisk() { return currentRisk != null ? currentRisk : true; } public void setCurrentRisk(Boolean value){ - if(value == null || !value) + if(value == null || value) { + // If the property doesn't exist, it is equivalent to true this.currentRisk = null; - else + } else { + // So it only needs to be stored if false this.currentRisk = value; + } } /** @@ -203,10 +206,13 @@ public boolean isFutureRisk() { return futureRisk != null ? futureRisk : true; } public void setFutureRisk(Boolean value){ - if(value == null || !value) - this.futureRisk = null; - else - this.futureRisk = value; + if(value == null || value) { + // If the property doesn't exist, it is equivalent to true + this.currentRisk = null; + } else { + // So it only needs to be stored if false + this.currentRisk = value; + } } } diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/util/QuerierUtils.java b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/util/QuerierUtils.java new file mode 100644 index 00000000..9ab94a31 --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelquerier/util/QuerierUtils.java @@ -0,0 +1,93 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2024 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By : Ken Meacham +// Created Date : 18/01/2024 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.modelquerier.util; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class QuerierUtils { + + private QuerierUtils() { + throw new IllegalStateException("QuerierUtils is a Utility class"); + } + + /** + * Given a set of ControlSet URIs, return all related URIs (see getControlTriplet) + * @param controlSets set of ControlSet URIs + * @return expanded set of ControlSet URIs + */ + public static Set getExpandedControlSets(Set controlSets) { + Set expandedControlSets = new HashSet<>(); + + for (String cs : controlSets) { + Set expCs = getControlTriplet(cs); + expandedControlSets.addAll(expCs); + } + + return expandedControlSets; + } + + /** + * Given a ControlSet URI, return the set of related URIs: csAvg, csMin, csMax + * @param csuri ControlSet URI (could be avg, min or max) + * @return set of related URIs: csAvg, csMin, csMax + */ + public static Set getControlTriplet(String csuri) { + String[] uriFrags = csuri.split("#"); + String uriPrefix = uriFrags[0]; + String shortUri = uriFrags[1]; + + String [] shortUriFrags = shortUri.split("-"); + String control = shortUriFrags[0] + "-" + shortUriFrags[1]; + control = control.replace("_Min", "").replace("_Max", ""); + String assetId = shortUriFrags[2]; + + String csAvg = uriPrefix + "#" + control + "-" + assetId; + String csMin = uriPrefix + "#" + control + "_Min" + "-" + assetId; + String csMax = uriPrefix + "#" + control + "_Max" + "-" + assetId; + + return new HashSet<>(Arrays.asList(csAvg, csMin, csMax)); + } + + /** + * get domain Control URI given a CS URI + * @param CS URI + * @return control URI + */ + public static String getDomainControlUri(String csuri) { + Pattern pattern = Pattern.compile("system#CS-(.*?)-[0-9a-f]+"); + Matcher matcher = pattern.matcher(csuri); + + if (matcher.find()) { + String extractedPart = matcher.group(1); // "CS-DisabledProcess" + return "domain#" + extractedPart; + } + return ""; + } + +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/ModelValidator.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/ModelValidator.java index 07ea2966..d070dd85 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/ModelValidator.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/ModelValidator.java @@ -111,6 +111,7 @@ public RiskCalcResultsDB calculateRiskLevels(RiskCalculationMode mode, boolean s final long startTime = System.currentTimeMillis(); IQuerierDB querier = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model); + //TODO: check when this should be run, as it may also be done elseqhere querier.initForRiskCalculation(); RiskCalculator rc = new RiskCalculator(querier); boolean success = rc.calculateRiskLevels(mode, saveResults, progress); diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/Validator.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/Validator.java index ef6f63af..2ea9eaf0 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/Validator.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/Validator.java @@ -1065,7 +1065,9 @@ public void createThreats() { } else { systemThreatAvg.setFrequency(domainThreat.getFrequency()); systemThreatAvg.setSecondaryThreat(domainThreat.isSecondaryThreat()); - systemThreatAvg.setNormalOperation(domainThreat.isNormalOperation()); + systemThreatAvg.setNormalOperation(domainThreat.isNormalOperation()); + systemThreatAvg.setCurrentRisk(domainThreat.isCurrentRisk()); + systemThreatAvg.setFutureRisk(domainThreat.isFutureRisk()); } // Create the minimum likelihood threat, if the domain model has one and the system pattern is a non-singleton @@ -1090,7 +1092,9 @@ public void createThreats() { systemThreatMin.setFrequency(domainThreat.getFrequency()); systemThreatMin.setSecondaryThreat(domainThreat.isSecondaryThreat()); systemThreatMin.setNormalOperation(domainThreat.isNormalOperation()); - } + systemThreatMin.setCurrentRisk(domainThreat.isCurrentRisk()); + systemThreatMin.setFutureRisk(domainThreat.isFutureRisk()); + } systemThreatMin.setMinOf(systemThreatAvg.getUri()); systemThreatAvg.setHasMin(systemThreatMin.getUri()); } @@ -1116,7 +1120,9 @@ public void createThreats() { } else { systemThreatMax.setFrequency(domainThreat.getFrequency()); systemThreatMax.setSecondaryThreat(domainThreat.isSecondaryThreat()); - systemThreatMax.setNormalOperation(domainThreat.isNormalOperation()); + systemThreatMax.setNormalOperation(domainThreat.isNormalOperation()); + systemThreatMax.setCurrentRisk(domainThreat.isCurrentRisk()); + systemThreatMax.setFutureRisk(domainThreat.isFutureRisk()); } systemThreatMax.setMaxOf(systemThreatAvg.getUri()); systemThreatAvg.setHasMax(systemThreatMax.getUri()); @@ -1353,16 +1359,22 @@ public void createControlStrategies() { controlStrategyAvg = new ControlStrategyDB(); controlStrategyAvg.setParent(dcsg.getUri()); controlStrategyAvg.setDescription(generateDescription(dcsg.getDescription(), matchingPattern)); + controlStrategyAvg.setFutureRisk(dcsg.isFutureRisk()); + controlStrategyAvg.setCurrentRisk(dcsg.isCurrentRisk()); if(threatMax != null) { controlStrategyMax = new ControlStrategyDB(); controlStrategyMax.setParent(dcsg.getUri()); controlStrategyMax.setDescription(generateDescription(dcsg.getDescription(), matchingPattern)); + controlStrategyMax.setFutureRisk(dcsg.isFutureRisk()); + controlStrategyMax.setCurrentRisk(dcsg.isCurrentRisk()); } if(threatMin != null) { controlStrategyMin = new ControlStrategyDB(); controlStrategyMin.setParent(dcsg.getUri()); controlStrategyMin.setDescription(generateDescription(dcsg.getDescription(), matchingPattern)); - } + controlStrategyMin.setFutureRisk(dcsg.isFutureRisk()); + controlStrategyMin.setCurrentRisk(dcsg.isCurrentRisk()); + } // Assemble a complete list of domain CS to be found, with a deterministic ordering List allCS = new ArrayList<>(); diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackNode.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackNode.java index e0ca136f..aa7ca181 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackNode.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackNode.java @@ -41,6 +41,7 @@ import com.bpodgursky.jbool_expressions.Variable; public class AttackNode { + private static final Logger logger = LoggerFactory.getLogger(AttackNode.class); private AttackPathDataset apd; @@ -96,12 +97,26 @@ public class AttackNode { private static final String ATTACK_MITIGATION_CSG = "attack_mitigation_csg"; private class InnerResult { + Set loopbackNodeUris = new HashSet<>(); Set allCauseUris = new HashSet<>(); int minDistance = 0; int maxDistance = 0; Map data = new HashMap<>(); + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("{{{"); + sb.append(loopbackNodeUris); + sb.append(", "); + sb.append(allCauseUris); + sb.append(", "); + sb.append("data:"); + sb.append(data); + sb.append("}}}"); + return sb.toString(); + } + public LogicalExpression getData(String key) { return this.data.get(key); } @@ -143,6 +158,13 @@ public int getMaxDistance() { } }; + /** + * Attack Node + * @param uri + * @param apd + * @param nodes + * @param id + */ public AttackNode(String uri, AttackPathDataset apd, AttackTree nodes, int id) { this.apd = apd; @@ -165,6 +187,10 @@ public AttackNode(String uri, AttackPathDataset apd, AttackTree nodes, int id) { this.uriSymbol = this.makeSymbol(uri); } + public LogicalExpression getAttackTreeMitigationCSG() { + return this.attackTreeMitigationCSG; + } + @Override public int hashCode() { return Objects.hash(this.uri); @@ -172,12 +198,19 @@ public int hashCode() { @Override public boolean equals(Object obj) { - AttackNode an = (AttackNode) obj; - if (this.uri.equals(an.getUri())) { + // Check for self-comparison + if (this == obj) { return true; - } else { + } + + // Use instanceof to check for null and ensure the correct type + if (!(obj instanceof AttackNode)) { return false; } + + AttackNode an = (AttackNode) obj; + + return java.util.Objects.equals(this.uri, an.getUri()); } public void setMaxDistanceFromTargetByTarget(String uri, int value) { @@ -205,7 +238,7 @@ public int getVisits() { } public String getVisitsStats() { - StringBuffer sb = new StringBuffer(); + StringBuilder sb = new StringBuilder(); sb.append(" Visits: " + this.visits); sb.append(" noCauseV: " + this.noCauseVisits); sb.append(" causeV: " + this.causeVisits); @@ -214,7 +247,7 @@ public String getVisitsStats() { } public String toString(String pad) { - StringBuffer sb = new StringBuffer(); + StringBuilder sb = new StringBuilder(); sb.append(pad + " ID("); sb.append(this.id); sb.append(") --> "); @@ -226,7 +259,7 @@ public String toString(String pad) { } public String toString() { - StringBuffer sb = new StringBuffer(); + StringBuilder sb = new StringBuilder(); sb.append("\nNode ("); sb.append(this.id); sb.append(") URI: "); @@ -259,9 +292,7 @@ public LogicalExpression getControlStrategies() { csgSymbols.add(this.makeSymbol(csgUri)); } - LogicalExpression leCSG = new LogicalExpression(this.apd, csgSymbols, false); - - return leCSG; + return new LogicalExpression(csgSymbols, false); } public LogicalExpression getControls() { @@ -276,7 +307,6 @@ public LogicalExpression getControls() { * * So we will end up with something like: OR(AND(c1, c1), AND(c3), AND(c1, c4)) */ - Set csgUris = this.apd.getThreatControlStrategyUris(this.uri, this.nodes.getIsFutureRisk()); List leCSGs = new ArrayList<>(); @@ -287,9 +317,9 @@ public LogicalExpression getControls() { for (String csUri : csUris) { csSymbols.add(this.makeSymbol(csUri)); } - leCSGs.add(new LogicalExpression(this.apd, csSymbols, true)); + leCSGs.add(new LogicalExpression(csSymbols, true)); } - return new LogicalExpression(this.apd, new ArrayList(leCSGs), false); + return new LogicalExpression(new ArrayList(leCSGs), false); } public int getId() { @@ -324,18 +354,18 @@ public List getAllDirectCauseUris() { } private Expression makeSymbol(String uri) { - // TODO need to find equivalent of symbol->algebra.definition - return Variable.of(this.uri); + return Variable.of(uri); } /** * Performs a backtrace from the current AttackNode to its ancestors * - * @param cPath the current path to the AttackNode being traced + * @param cPath the current path to the AttackNode being traced * @param computeLogic compute the logical result of the backtrace * @return an object containing the results of the backtrace - * @throws TreeTraversalException if an error occurs during traversal of the AttackNode tree - * @throws Exception if an unexpected error occurs + * @throws TreeTraversalException if an error occurs during traversal of the + * AttackNode tree + * @throws Exception if an unexpected error occurs */ public InnerResult backtrace(Set cPath, boolean computeLogic) throws TreeTraversalException, Exception { @@ -346,9 +376,10 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre } currentPath.add(this.uri); - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " BACKTRACE for: " + this.uri.substring(7) + " (nodeID:" + this.id + ") "+ - // " current path length: " + (currentPath.size()-1)); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " BACKTRACE for: " + this.uri.substring(7) + " (nodeID:" + this.id + ") " + + " current path length: " + (currentPath.size() - 1) + + " all direct cause uris: " + this.allDirectCauseUris.size()); this.visits += 1; @@ -368,8 +399,8 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre intersection.retainAll(currentPath); if (intersection.size() == result.getLoopbackNodeUris().size()) { this.cacheHitVisits += 1; - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " Cache hit, no cause"); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " Cache hit, no cause"); throw new TreeTraversalException(result.getLoopbackNodeUris()); } } @@ -399,8 +430,8 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre continue; } else { // then in this case there is more to explore - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " Cache hit: node can be cause, but more to explore"); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " Cache hit: node can be cause, but more to explore"); useCache = false; break; } @@ -408,8 +439,8 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre if (useCache) { this.cacheHitVisits += 1; - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " Cache hit, node can be caused, cache can be used"); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " Cache hit, node can be caused, cache can be used"); return res; } } @@ -443,23 +474,20 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre boolean outerSuccess = true; // need this for try->except->ELSE* python equivalent try { - this.allDirectCauseUris = this.getAllDirectCauseUris(); - if (this.allDirectCauseUris.isEmpty()) { - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " No direct causes"); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " No direct causes"); // This will be top of tree misbehaviours (normal-op, external // cause). Not root causes as they have parents in normal-ops. // TODO: can this just move to the end of the function? - tmpMinDistanceFromRoot = -1; tmpMaxDistanceFromRoot = -1; - List tmpObjList = new ArrayList(); + List tmpObjList = new ArrayList<>(); tmpObjList.add(this.makeSymbol(this.uri)); - tmpRootCause = new LogicalExpression(this.apd, tmpObjList, true); + tmpRootCause = new LogicalExpression(tmpObjList, true); if (this.isThreat()) { String err = "There should not be a threat with no parents: " + this.uri; @@ -469,7 +497,6 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre logger.error(err); throw new Exception(err); // TODO: put error in exception and choose a better Exception class } else { - attackMitigatedByCS = null; threatMitigatedByCS = null; attackMitigatedByCSG = null; @@ -482,25 +509,25 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre Set intersection = new HashSet<>(this.allDirectCauseUris); intersection.retainAll(currentPath); - if (intersection.size() > 0) { + if (!intersection.isEmpty()) { // For a threat we require all parents. // If even one is on the current path then the threat is triggered by its own consequence which is useless. List consequence = new ArrayList<>(); for (String item : intersection) { consequence.add(item.substring(7)); } - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " threat is dependent on its own consequence: " + consequence); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " threat is dependent on its own consequence: " + consequence); throw new TreeTraversalException(intersection); } List sortedCauses = new ArrayList<>(this.allDirectCauseUris); Collections.sort(sortedCauses); - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " " + sortedCauses.size() + " direct causes of threat"); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " " + sortedCauses.size() + " direct causes of threat"); - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " └─>" + sortedCauses); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " └─>" + sortedCauses); for (String parentUri : sortedCauses) { AttackNode parent = this.nodes.getOrCreateNode(parentUri); @@ -522,12 +549,10 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre // We could collect all the p_results from the try // block and then iterate through them instead of // executing immediately. - validParentUris.add(parentUri); loopbackNodeUris.addAll(pResult.getLoopbackNodeUris()); allCauseUris.addAll(pResult.getAllCauseUris()); - // if (this.isNormalOp() == parent.isNormalOp()) { if (Objects.equals(this.isNormalOp(), parent.isNormalOp()) && !parent.isExternalCause()) { parentMinDistancesFromRoot.add(pResult.getMinDistance()); parentMaxDistancesFromRoot.add(pResult.getMaxDistance()); @@ -539,17 +564,14 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre parentThreatMitigationsCSG.add(pResult.getData(THREAT_MITIGATION_CSG)); // Entire path parentThreatTrees.add(pResult.getData(THREAT_TREE)); if (!parent.isNormalOp() && !parent.isExternalCause()) { - parentAttackMitigationsCS.add(pResult.getData(THREAT_MITIGATION_CS)); - parentAttackMitigationsCSG.add(pResult.getData(THREAT_MITIGATION_CSG)); + parentAttackMitigationsCS.add(pResult.getData(ATTACK_MITIGATION_CS)); + parentAttackMitigationsCSG.add(pResult.getData(ATTACK_MITIGATION_CSG)); parentAttackTrees.add(pResult.getData(ATTACK_TREE)); } } } } - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " Finished looking at threat causes (nodeID:" + this.id + ")"); - if (parentRootCauses.isEmpty()) { // then this is a root cause threat parentMinDistancesFromRoot = new ArrayList<>(); @@ -558,20 +580,23 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre parentMaxDistancesFromRoot = new ArrayList<>(); parentMaxDistancesFromRoot.add(-1); - List tmpObjList = new ArrayList(); + List tmpObjList = new ArrayList<>(); tmpObjList.add(this.makeSymbol(this.uri)); - parentRootCauses.add(new LogicalExpression(this.apd, tmpObjList, true)); + parentRootCauses.add(new LogicalExpression(tmpObjList, true)); } // The root cause of a threat is all (AND) of the rout // causes of its parents - tmpRootCause = new LogicalExpression(this.apd, parentRootCauses, true); + tmpRootCause = new LogicalExpression(parentRootCauses, true); // The distance from a root cause therefore is the maximum // of the parent distances +1 tmpMinDistanceFromRoot = Collections.max(parentMinDistancesFromRoot) + 1; tmpMaxDistanceFromRoot = Collections.max(parentMaxDistancesFromRoot) + 1; + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " Finished looking at threat causes (nodeID:" + this.id + ")"); + if (computeLogic == true) { // The attack/threat tree is // AND( @@ -580,15 +605,15 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre // ) if (!this.isNormalOp()) { // if this threat (self) is on the attack path then - // it can inself bea mitigation on the attack_path + // it can inself be a mitigation on the attack_path parentAttackTrees.add(this.uriSymbol); } - bsAttackTree = new LogicalExpression(this.apd, parentAttackTrees, true); + bsAttackTree = new LogicalExpression(parentAttackTrees, true); // All threats are on the threat path parentThreatTrees.add(this.uriSymbol); - threatTree = new LogicalExpression(this.apd, parentThreatTrees, true); + threatTree = new LogicalExpression(parentThreatTrees, true); /* * A threat can be mitigated by OR( inactive control strategies located at itself mitigations of any of its parents ) @@ -605,13 +630,13 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre parentThreatMitigationsCS.add(this.controls); parentThreatMitigationsCSG.add(this.controlStrategies); - attackMitigatedByCS = new LogicalExpression(this.apd, + attackMitigatedByCS = new LogicalExpression( new ArrayList(parentAttackMitigationsCS), false); - threatMitigatedByCS = new LogicalExpression(this.apd, + threatMitigatedByCS = new LogicalExpression( new ArrayList(parentThreatMitigationsCS), false); - attackMitigatedByCSG = new LogicalExpression(this.apd, + attackMitigatedByCSG = new LogicalExpression( new ArrayList(parentAttackMitigationsCSG), false); - threatMitigatedByCSG = new LogicalExpression(this.apd, + threatMitigatedByCSG = new LogicalExpression( new ArrayList(parentThreatMitigationsCSG), false); } } else { @@ -623,11 +648,11 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre sortedCauses.removeAll(currentPath); Collections.sort(sortedCauses); - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " " + sortedCauses.size() + " direct causes of MS"); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " " + sortedCauses.size() + " direct causes of MS"); - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " └─>" + sortedCauses); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " └─>" + sortedCauses); for (String parentUri : sortedCauses) { AttackNode parent = this.nodes.getOrCreateNode(parentUri); @@ -655,8 +680,8 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre parentThreatMitigationsCSG.add(pResult.getData(THREAT_MITIGATION_CSG)); // Entire path parentThreatTrees.add(pResult.getData(THREAT_TREE)); if (!parent.isNormalOp()) { - parentAttackMitigationsCS.add(pResult.getData(THREAT_MITIGATION_CS)); - parentAttackMitigationsCSG.add(pResult.getData(THREAT_MITIGATION_CSG)); + parentAttackMitigationsCS.add(pResult.getData(ATTACK_MITIGATION_CS)); + parentAttackMitigationsCSG.add(pResult.getData(ATTACK_MITIGATION_CSG)); parentAttackTrees.add(pResult.getData(ATTACK_TREE)); } } @@ -666,39 +691,38 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre if (validParentUris.isEmpty()) { // Then all parents have thrown exceptions or were on the // current path - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " misbehaviour with all parents invalid: " + this.uri + " (nodeID:" + this.id + ")"); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " misbehaviour with all parents invalid: " + this.uri + " (nodeID:" + this.id + ")"); throw new TreeTraversalException(loopbackNodeUris); } // The rootCause of a misbehaviour is any (OR) of the root // cause of its parents - rootCause = new LogicalExpression(this.apd, parentRootCauses, false); + rootCause = new LogicalExpression(parentRootCauses, false); // The distance from a root cause is therefore the minimum of // the parent distances tmpMinDistanceFromRoot = Collections.min(parentMinDistancesFromRoot) + 1; tmpMaxDistanceFromRoot = Collections.min(parentMaxDistancesFromRoot) + 1; - // logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + - // " Finished looking at MS causes (nodeID:" + this.id + ") distance: " + - // tmpMinDistanceFromRoot + " " + tmpMaxDistanceFromRoot); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " Finished looking at MS causes (nodeID:" + this.id + ") distance: " + + tmpMinDistanceFromRoot + " " + tmpMaxDistanceFromRoot); if (computeLogic) { - bsAttackTree = new LogicalExpression(this.apd, parentAttackTrees, false); - bsThreatTree = new LogicalExpression(this.apd, parentThreatTrees, false); - + bsAttackTree = new LogicalExpression(parentAttackTrees, false); + bsThreatTree = new LogicalExpression(parentThreatTrees, false); // Misbehaviours can be miticated by // AND( // mitigations of their parents // ) - attackMitigatedByCS = new LogicalExpression(this.apd, + attackMitigatedByCS = new LogicalExpression( new ArrayList(parentAttackMitigationsCS), true); - threatMitigatedByCS = new LogicalExpression(this.apd, + threatMitigatedByCS = new LogicalExpression( new ArrayList(parentThreatMitigationsCS), true); - attackMitigatedByCSG = new LogicalExpression(this.apd, + attackMitigatedByCSG = new LogicalExpression( new ArrayList(parentAttackMitigationsCSG), true); - threatMitigatedByCSG = new LogicalExpression(this.apd, + threatMitigatedByCSG = new LogicalExpression( new ArrayList(parentThreatMitigationsCSG), true); } } @@ -706,24 +730,23 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre } catch (TreeTraversalException error) { outerSuccess = false; - // logger.error(String.format("%1$"+ currentPath.size() +"s", "") + - // " Error " + this.uri + " (nodeID:" + this.id + ")"); - + //logger.debug(String.format("%1$"+ currentPath.size() +"s", "") + + // " Error " + this.uri + " (nodeID:" + this.id + ")"); loopbackNodeUris = error.getLoopbackNodeUris(); - Set loopbackNodeUrisOnPath = new HashSet(currentPath); + Set loopbackNodeUrisOnPath = new HashSet<>(currentPath); loopbackNodeUrisOnPath.retainAll(loopbackNodeUris); loopbackNodeUrisOnPath.remove(this.uri); InnerResult result = new InnerResult(); if (loopbackNodeUrisOnPath.isEmpty()) { this.cannotBeCaused = true; - // logger.error(String.format("%1$"+ currentPath.size() +"s", "") + - // " Error " + this.uri + " can never be caused (nodeID:" + this.id + ")"); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " Error " + this.uri + " can never be caused (nodeID:" + this.id + ")"); } else { result.setLoopbackNodeUris(loopbackNodeUrisOnPath); - // logger.error(String.format("%1$"+ currentPath.size() +"s", "") + - // " Error " + this.uri + " caused by node on path: (nodeID:" + this.id + ")"); + logger.debug(String.format("%1$" + currentPath.size() + "s", "") + + " Error " + this.uri + " caused by node on path: (nodeID:" + this.id + ")"); } this.noCauseResults.add(result); @@ -750,9 +773,8 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre * this node, but before that we need to merge the results with any others that have previously been found from other paths to this node. Interestingly, when * combining cause over different paths, the logic is reversed. */ - List tmpObjList = new ArrayList<>(Arrays.asList(this.rootCause, tmpRootCause)); - this.rootCause = new LogicalExpression(this.apd, tmpObjList, false); + this.rootCause = new LogicalExpression(tmpObjList, false); // Save the max and min distance from this root_cause // The max is useful to spread things out for display @@ -765,29 +787,28 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre // although tempting to calculate the distance from target here, we // can't because we don't know if the current tree is going to be // successful all the way back to the target. - if (computeLogic) { List aCsList = new ArrayList<>( Arrays.asList(this.attackTreeMitigationCS, attackMitigatedByCS)); - this.attackTreeMitigationCS = new LogicalExpression(this.apd, new ArrayList(aCsList), true); + this.attackTreeMitigationCS = new LogicalExpression(new ArrayList(aCsList), true); List tCsList = new ArrayList<>( Arrays.asList(this.threatTreeMitigationCS, threatMitigatedByCS)); - this.threatTreeMitigationCS = new LogicalExpression(this.apd, new ArrayList(tCsList), true); + this.threatTreeMitigationCS = new LogicalExpression(new ArrayList(tCsList), true); List aCsgList = new ArrayList<>( Arrays.asList(this.attackTreeMitigationCSG, attackMitigatedByCSG)); - this.attackTreeMitigationCSG = new LogicalExpression(this.apd, new ArrayList(aCsgList), true); + this.attackTreeMitigationCSG = new LogicalExpression(new ArrayList(aCsgList), true); List tCsgList = new ArrayList<>( Arrays.asList(this.threatTreeMitigationCSG, threatMitigatedByCSG)); - this.threatTreeMitigationCSG = new LogicalExpression(this.apd, new ArrayList(tCsgList), true); + this.threatTreeMitigationCSG = new LogicalExpression(new ArrayList(tCsgList), true); List atList = new ArrayList<>(Arrays.asList(this.attackTree, bsAttackTree)); - this.attackTree = new LogicalExpression(this.apd, new ArrayList(atList), false); + this.attackTree = new LogicalExpression(new ArrayList(atList), false); List ttList = new ArrayList<>(Arrays.asList(this.threatTree, bsThreatTree)); - this.threatTree = new LogicalExpression(this.apd, new ArrayList(ttList), false); + this.threatTree = new LogicalExpression(new ArrayList(ttList), false); } InnerResult iResult = new InnerResult(); @@ -804,7 +825,7 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre iResult.putData(THREAT_MITIGATION_CS, threatMitigatedByCS); iResult.putData(THREAT_MITIGATION_CSG, threatMitigatedByCSG); iResult.putData(ATTACK_TREE, bsAttackTree); - iResult.putData(ATTACK_TREE, bsThreatTree); + iResult.putData(THREAT_TREE, bsThreatTree); } this.causeResults.add(iResult); @@ -817,10 +838,18 @@ public InnerResult backtrace(Set cPath, boolean computeLogic) throws Tre return new InnerResult(); } + /** + * Get direct cause URIs + * @return + */ public Set getDirectCauseUris() { return this.directCauseUris; } + /** + * Add direct cause URIs to directCauseUris + * @param uris + */ public void addDirectCauseUris(Set uris) { this.directCauseUris.addAll(uris); for (String causeUri : uris) { diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackPathAlgorithm.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackPathAlgorithm.java index c274f5cc..6bbc6f17 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackPathAlgorithm.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackPathAlgorithm.java @@ -53,9 +53,7 @@ public AttackPathAlgorithm(IQuerierDB querier) { final long startTime = System.currentTimeMillis(); logger.debug("STARTING Shortest Path Attack algortithm ..."); - - // TODO might have to delay initialisation of the dataset until risk - // mode is checked. + apd = new AttackPathDataset(querier); final long endTime = System.currentTimeMillis(); @@ -90,38 +88,19 @@ public void checkRequestedRiskCalculationMode(String requestedRiskMode) { } public boolean checkTargetUris(List targetUris) { - boolean retVal = true; logger.debug("Checking submitted list of target URIs: {}", targetUris); - if (!apd.checkMisbehaviourList(targetUris)) { - logger.error("shortest path, target MS URI not valid"); - retVal = false; - } - return retVal; - } - - public AttackTree calculateAttack(List targetUris, boolean allPaths, boolean normalOperations) - throws RuntimeException { - - logger.debug("calculate attack tree with allPaths: {}, normalOperations: {}", allPaths, normalOperations); - logger.debug("target URIs: {}", targetUris); - - AttackTree attackTree; - - try { - final long startTime = System.currentTimeMillis(); - // calculate attack tree, allPath dictates one or two backtrace - // AttackTree is initialised with FUTURE risk mode enabled - attackTree = new AttackTree(targetUris, true, !allPaths, apd); - - final long endTime = System.currentTimeMillis(); - logger.info("AttackPathAlgorithm.calculateAttackTree: execution time {} ms", endTime - startTime); - - } catch (Exception e) { - throw new RuntimeException(e); + // Check if the list is null or empty + if (targetUris == null || targetUris.isEmpty()) { + logger.warn("The list of target URIs is null or empty."); + return false; } - return attackTree; + if (!apd.checkMisbehaviourList(targetUris)) { + logger.error("shortest path, target MS URI not valid"); + return false; + } + return true; } public TreeJsonDoc calculateAttackTreeDoc(List targetUris, String riskCalculationMode, boolean allPaths, diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackPathDataset.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackPathDataset.java index 3622b4f0..5fa6592a 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackPathDataset.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackPathDataset.java @@ -32,24 +32,36 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.regex.Matcher; -import java.util.regex.Pattern; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import uk.ac.soton.itinnovation.security.model.Level; import uk.ac.soton.itinnovation.security.model.system.RiskCalculationMode; +import uk.ac.soton.itinnovation.security.model.system.RiskVector; import uk.ac.soton.itinnovation.security.modelquerier.IQuerierDB; import uk.ac.soton.itinnovation.security.modelquerier.dto.AssetDB; +import uk.ac.soton.itinnovation.security.modelquerier.dto.ControlDB; import uk.ac.soton.itinnovation.security.modelquerier.dto.ControlSetDB; import uk.ac.soton.itinnovation.security.modelquerier.dto.ControlStrategyDB; import uk.ac.soton.itinnovation.security.modelquerier.dto.LevelDB; +import uk.ac.soton.itinnovation.security.modelquerier.dto.MisbehaviourDB; import uk.ac.soton.itinnovation.security.modelquerier.dto.MisbehaviourSetDB; +import uk.ac.soton.itinnovation.security.modelquerier.dto.ModelDB; import uk.ac.soton.itinnovation.security.modelquerier.dto.ThreatDB; import uk.ac.soton.itinnovation.security.modelquerier.dto.TrustworthinessAttributeSetDB; +import uk.ac.soton.itinnovation.security.modelquerier.util.QuerierUtils; +import uk.ac.soton.itinnovation.security.modelvalidator.Progress; +import uk.ac.soton.itinnovation.security.modelvalidator.RiskCalculator; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.AssetDTO; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.ConsequenceDTO; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.ControlDTO; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.StateDTO; + public class AttackPathDataset { + private static final Logger logger = LoggerFactory.getLogger(AttackPathDataset.class); protected IQuerierDB querier; @@ -85,20 +97,30 @@ public AttackPathDataset(IQuerierDB querier) { // Save the querier reference for use in other methods this.querier = querier; - // Load domain model poulation, impact, trustworthiness, risk and likelihood scales as maps keyed on their URI + // Load domain model poulation, impact, trustworthiness, risk and likelihood scales as maps keyed on their short URI (e.g. "domain#RiskLevelMedium") poLevels = querier.getPopulationLevels(); imLevels = querier.getImpactLevels(); liLevels = querier.getLikelihoodLevels(); twLevels = querier.getTrustworthinessLevels(); riLevels = querier.getRiskLevels(); - // Load domain model impact, trustworthiness, risk, and likelihood scales as lists sorted by their level value + // Make a sorted list of the LevelDB objects by their risk level values riskLevels.addAll(riLevels.values()); riskLevels.sort(Comparator.comparingInt(LevelDB::getLevelValue)); // Load system model assets, matching patterns and nodes assets = querier.getAssets("system", "system-inf"); + updateDatasets(); + + final long endTime = System.currentTimeMillis(); + logger.info("AttackPathDataset.AttackPathDataset(IQuerierDB querier): execution time {} ms", + endTime - startTime); + + } + + private void updateDatasets() { + // Load system model trustworthiness attribute sets trustworthinessAttributeSets = querier.getTrustworthinessAttributeSets("system-inf"); @@ -114,12 +136,17 @@ public AttackPathDataset(IQuerierDB querier) { // Load system model control strategies and determine whether they are enabled controlStrategies = querier.getControlStrategies("system-inf"); - final long endTime = System.currentTimeMillis(); - logger.info("AttackPathDataset.AttackPathDataset(IQuerierDB querier): execution time {} ms", - endTime - startTime); + // Create likelihood maps + for (ThreatDB threat : threats.values()) { + likelihoods.put(threat.getUri(), threat.getPrior()); + } + for (MisbehaviourSetDB miss : misbehaviourSets.values()) { + likelihoods.put(miss.getUri(), miss.getPrior()); + } } + /* * Create maps required by the risk calculation to find TWAS, MS and their relationship to roles */ @@ -135,80 +162,30 @@ protected void createMaps() { likelihoods.put(miss.getUri(), miss.getPrior()); } + final long endTime = System.currentTimeMillis(); + logger.debug("*********CREATE MAPS*********"); logger.debug("AttackPathDataset threats: {}", threats.size()); logger.debug("AttackPathDataset MS: {}", misbehaviourSets.size()); logger.debug("AttackPathDataset likelihoods: {}", likelihoods.size()); logger.debug("*****************************"); - - final long endTime = System.currentTimeMillis(); logger.info("AttackPathDataset.CreateMaps(): execution time {} ms", endTime - startTime); } public boolean isFutureRisk(String input) { - RiskCalculationMode requestedMode; try { - requestedMode = RiskCalculationMode.valueOf(input); + RiskCalculationMode requestedMode = RiskCalculationMode.valueOf(input); return requestedMode == RiskCalculationMode.FUTURE; } catch (IllegalArgumentException e) { // TODO: throw an exception - logger.error("Found unexpected riskCalculationMode parameter value {}.", input); + logger.warn("Found unexpected riskCalculationMode parameter value {}.", input); return false; } } - public boolean calculateAttackPath() throws RuntimeException { - try { - createMaps(); - return true; - } catch (Exception e) { - logger.error("calculating attack path dataset failed", e); - throw new RuntimeException(e); - } - } - - private void printAttackPathDataset() { - logger.debug("*******************************************************"); - logger.debug("*******************************************************"); - logger.debug("Threat CSGs:"); - for (ThreatDB threat : threats.values()) { - int csgsSize = threat.getBlockedByCSG().size() + threat.getMitigatedByCSG().size(); - if (csgsSize > 0) { - logger.debug(" {}, blocked: {} mitigated: {}", threat.getUri(), threat.getBlockedByCSG().size(), - threat.getMitigatedByCSG().size()); - } - Collection csgsBlocked = threat.getBlockedByCSG(); - if (csgsBlocked.size() > 0) { - for (String csg : csgsBlocked) { - List css = this.controlStrategies.get(csg).getMandatoryCS(); - logger.debug(" CSG blocked: {}, cs: {}", csg, css.size()); - for (String cs : css) { - logger.debug(" cs: {}", cs); - } - } - } - } - - logger.debug("Control Strategies"); - for (ControlStrategyDB csg : controlStrategies.values()) { - logger.debug("CSG: {} cs: {}", csg.getUri(), csg.getMandatoryCS().size()); - for (String cs : csg.getMandatoryCS()) { - logger.debug(" cs: {}", cs); - } - } - - logger.debug("ContolSets:"); - for (ControlSetDB cs : controlSets.values()) { - logger.debug("ControlSet: {}, proposed {}", cs.getUri(), cs.isProposed()); - } - logger.debug("Misbehaviours"); - for (MisbehaviourSetDB ms : misbehaviourSets.values()) { - AssetDB asset = assets.get(ms.getLocatedAt()); - logger.debug(" MS {}, likelihood: {}, risk: {}, asset: {}", ms.getUri(), ms.getPrior(), ms.getRisk(), - asset.getLabel()); - } - logger.debug("*******************************************************"); - logger.debug("*******************************************************"); + public String getCSGDescription(String uri) { + ControlStrategyDB csg = controlStrategies.get(uri); + return csg.getDescription(); } public Map getLikelihoods() { @@ -225,13 +202,12 @@ public Set getNormalOps() { * @param uri * @return */ - // TODO MS will provide a direct call to get uris public List getMisbehaviourDirectCauseUris(String misbUri) throws RuntimeException { try { MisbehaviourSetDB ms = misbehaviourSets.get(misbUri); return new ArrayList<>(ms.getCausedBy()); } catch (Exception e) { - return new ArrayList(); + return new ArrayList<>(); } } @@ -246,65 +222,43 @@ public List getThreatDirectCauseUris(String threatUri) throws RuntimeExc ThreatDB threat = threats.get(threatUri); return new ArrayList<>(threat.getCausedBy()); } catch (Exception e) { - return new ArrayList(); + return new ArrayList<>(); } } /** - * check if CSG ends in -Runtime or -Implementation + * Check if a Control Strategy Group (CSG) is activated. * - * @param csgUri - * @return - */ - public boolean isCurrentRiskCSG(String csgUri) { - return this.checkImplementationRuntime(csgUri); - } - - private boolean checkImplementationRuntime(String csgUri) { - Pattern pattern = Pattern.compile("\\b-Implementation-Runtime\\b|\\b-Implementation\\b"); - Matcher matcher = pattern.matcher(csgUri); - if (matcher.find()) { - return true; - } else { - return false; - } - } - - /** - * check if CSG ends in -Implementation-Runtime or -Implementation + * This method evaluates whether all mandatory Control Sets (CS) associated + * with the given CSG are proposed. * - * @param csgUri - * @return + * @param csg The Control Strategy Group to be checked. + * @return {@code true} if all mandatory Control Sets are proposed, + * otherwise {@code false}. */ - public boolean isFutureRiskCSG(String csgUri) { - // TODO: REGEX is now changed!!! - return !(csgUri.endsWith("-Implementation-Runtime") || csgUri.endsWith("-Implementation")); + public boolean isCSGActivated(ControlStrategyDB csg) { + return csg.getMandatoryCS().stream().allMatch(cs -> controlSets.get(cs).isProposed()); } /** - * check if CSG has a contingency plan + * Check if control strategy plan exists and is activated need to have a + * different way checking for contingency plans * - * @param csgUri - * @return + * @param csg the control stragegy + * @return {@code true} if contingency plan exists and is activated, + * otherwise {@code false} */ - public boolean checkContingencyPlan(String csgUri) throws RuntimeException { + public boolean hasContingencyPlan(String csgUri) throws RuntimeException { try { String contingencyPlan; - if (this.checkImplementationRuntime(csgUri)) { + if (csgUri.contains("-Implementation")) { contingencyPlan = csgUri.replaceAll("-Implementation-Runtime|-Implementation", ""); } else { - return false; + return true; } if (controlStrategies.containsKey(contingencyPlan)) { - boolean activated = true; - for (String cs : controlStrategies.get(contingencyPlan).getMandatoryCS()) { - if (!controlSets.get(cs).isProposed()) { - activated = false; - break; - } - } - return activated; + return isCSGActivated(controlStrategies.get(contingencyPlan)); } return true; } catch (Exception e) { @@ -313,34 +267,54 @@ public boolean checkContingencyPlan(String csgUri) throws RuntimeException { } /** - * get threat CSGs + * return false when this CSG + * - has no effect in future risk calculations + * - has no effect in current risk calculations + * - cannot be changed at runtime + * @param csg + * @param future + * @return + */ + boolean considerCSG(ControlStrategyDB csg, boolean future) { + if (future) { + return csg.isFutureRisk(); + } else { + return csg.isCurrentRisk() && isRuntimeMalleable(csg); + } + } + + /** + * Check if CS is runtime malleable assume all -Implementation, + * -Implementation-Runtime CSGs have contingency plans activated. * - * @param threatUri - * @return + * @param csg + * @return boolean */ - public Set getThreatControlStrategyUris(String threatUri, boolean future) throws RuntimeException { - // Return list of control strategies (urirefs) that block a threat - // (uriref) + Boolean isRuntimeMalleable(ControlStrategyDB csg) { + if (csg.getUri().contains("-Implementation")) { + return true; + //return hasContingencyPlan(csg.getUri()); + } else if (csg.getUri().contains("-Runtime")) { + return true; + } + return false; + } - /* - * "blocks": means a CSG appropriate for current or future risk calc "mitigates": means a CSG appropriate for furture risk (often a contingency plan for a - * current risk CSG); excluded from likelihood calc in current risk - */ + public Set getThreatControlStrategyUris(String threatUri, boolean future) throws RuntimeException { + // Return list of control strategies (urirefs) that block a threat (uriref) - Set csgURIs = new HashSet(); Set csgToConsider = new HashSet<>(); ThreatDB threat = this.threats.get(threatUri); try { - csgURIs.addAll(threat.getBlockedByCSG()); - if (future) { - csgURIs.addAll(threat.getMitigatedByCSG()); - } - for (String csgURI : csgURIs) { + for (String csgURI : threat.getBlockedByCSG()) { ControlStrategyDB csg = querier.getControlStrategy(csgURI, "system-inf"); - if (csg.isCurrentRisk()) { + if (considerCSG(csg, future)) { csgToConsider.add(csgURI); + } else { + logger.debug("CSG {} is NOT considered", csgURI); } } + } catch (Exception e) { throw new RuntimeException(e); } @@ -373,9 +347,9 @@ public List getCsgControlSetsUris(String csgUri) throws RuntimeException */ public List getCsgControlSets(String csgUri) throws RuntimeException { try { - List csList = new ArrayList(); + List csList = new ArrayList<>(); for (String csUri : controlStrategies.get(csgUri).getMandatoryCS()) { - csList.add(controlSets.get(csgUri)); + csList.add(controlSets.get(csUri)); } return csList; } catch (Exception e) { @@ -389,24 +363,25 @@ public List getCsgControlSets(String csgUri) throws RuntimeExcepti * @param csgUri * @return */ - public List getCsgInactiveControlSets(String csgUri) throws RuntimeException { - + public List getCsgInactiveControlSets(String csgUri) { try { - List csList = new ArrayList<>(); - for (String csUri : this.controlStrategies.get(csgUri).getMandatoryCS()) { - // TODO needs revisiting, CS object should be accessed directly - for (ControlSetDB cs : controlSets.values()) { - if (cs.getUri().equals(csUri) && (!cs.isProposed())) { - csList.add(csUri); - } - } - } - return csList; + return controlSets.values().stream() + .filter(cs -> !cs.isProposed() && (isMandatoryCS(csgUri, cs) || isOptionalCS(csgUri, cs))) + .map(ControlSetDB::getUri) + .collect(Collectors.toList()); } catch (Exception e) { throw new RuntimeException(e); } } + private boolean isMandatoryCS(String csgUri, ControlSetDB cs) { + return controlStrategies.get(csgUri).getMandatoryCS().contains(cs.getUri()); + } + + private boolean isOptionalCS(String csgUri, ControlSetDB cs) { + return controlStrategies.get(csgUri).getOptionalCS().contains(cs.getUri()); + } + /** * get threat inactive CSGs * @@ -415,7 +390,7 @@ public List getCsgInactiveControlSets(String csgUri) throws RuntimeExcep */ public List getThreatInactiveCSGs(String threatUri, boolean future) throws RuntimeException { try { - List csgUriList = new ArrayList(); + List csgUriList = new ArrayList<>(); for (String csgUri : getThreatControlStrategyUris(threatUri, future)) { if (!getCsgInactiveControlSets(csgUri).isEmpty()) { csgUriList.add(csgUri); @@ -427,60 +402,38 @@ public List getThreatInactiveCSGs(String threatUri, boolean future) thro } } - // TODO filtering LevelValue should be a parameter - public List filterMisbehaviours() throws RuntimeException { - /* - * compare MS by risk then likelihood, and return MS with likelihood or risk >= MEDIUM - */ - - List msUris = new ArrayList<>(); - try { - logger.debug("filtering misbehaviour sets..."); - - List msSorted = new ArrayList<>(misbehaviourSets.values()); - - Comparator comparator = Comparator.comparing(MisbehaviourSetDB::getRisk) - .thenComparing(MisbehaviourSetDB::getPrior); + /** + * Return MS with risk level > acceptableRiskLevel + */ + public List filterMisbehavioursByRiskLevel(String acceptableRiskLevel) { - msSorted.sort(comparator); + List msUris = new ArrayList<>(); - List msFiltered = msSorted.stream() - .filter(ms -> riLevels.get(ms.getRisk()).getLevelValue() >= 3).collect(Collectors.toList()); + logger.debug("filtering misbehaviour sets..."); - for (MisbehaviourSetDB ms : msFiltered) { - AssetDB asset = assets.get(ms.getLocatedAt()); - logger.debug("filtered MS: {} \t-> risk {} prior {} at {}", ms.getUri().substring(7), - ms.getRisk().substring(7), ms.getPrior().substring(7), asset.getLabel()); + int acceptableThreshold = riLevels.get(acceptableRiskLevel).getLevelValue(); + for (MisbehaviourSetDB ms : misbehaviourSets.values()) { + if (riLevels.get(ms.getRisk()).getLevelValue() > acceptableThreshold) { msUris.add(ms.getUri()); } - - logger.debug("filtered MS sets size: {}/{}", msUris.size(), misbehaviourSets.size()); - - } catch (Exception e) { - logger.error("got an error filtering misbehaviours: {}", e.getMessage()); - throw new RuntimeException("got an error filtering misbehavours", e); } + logger.debug("filtered MS sets size: {}/{}", msUris.size(), misbehaviourSets.size()); + return msUris; } + public boolean isExternalCause(String uri) { - boolean retVal = false; - // TODO: no need to check MS for external causes any more? if (misbehaviourSets.containsKey(uri)) { MisbehaviourSetDB ms = querier.getMisbehaviourSet(uri, "system-inf"); - if (ms != null) { - retVal = ms.isExternalCause(); - } + return (ms != null) && ms.isExternalCause(); } else if (trustworthinessAttributeSets.containsKey(uri)) { TrustworthinessAttributeSetDB twa = trustworthinessAttributeSets.get(uri); - if (twa != null) { - retVal = twa.isExternalCause(); - } + return (twa != null) && twa.isExternalCause(); } - - return retVal; + return false; } /** @@ -490,67 +443,41 @@ public boolean isExternalCause(String uri) { * @rerutn boolean */ public boolean isNormalOp(String uri) { - boolean retVal = false; // check if we have to deal with a threat URI if (this.threats.containsKey(uri)) { ThreatDB threat = this.querier.getThreat(uri, "system-inf"); - if (threat != null) { - retVal = threat.isNormalOperation(); - } + return (threat != null) && threat.isNormalOperation(); } else if (misbehaviourSets.containsKey(uri)) { MisbehaviourSetDB ms = querier.getMisbehaviourSet(uri, "system-inf"); - if (ms != null) { - retVal = ms.isNormalOpEffect(); - } + return (ms != null) && ms.isNormalOpEffect(); } else if (trustworthinessAttributeSets.containsKey(uri)) { - retVal = false; + return false; } else { logger.warn("Not sure what is this: {}", uri); + return false; } - - return retVal; } // describes if the URI refers to an initial cause misbehaviour public boolean isInitialCause(String uri) { - if (this.threats.keySet().contains(uri)) { - return threats.get(uri).isInitialCause(); - } else { - return false; - } + return threats.containsKey(uri) && threats.get(uri).isInitialCause(); } public boolean isThreatSimple(String uri) { - if (this.threats.keySet().contains(uri)) { - return true; - } else { - return false; - } + return this.threats.keySet().contains(uri); } public boolean isMisbehaviourSet(String uri) { - if (this.misbehaviourSets.keySet().contains(uri)) { - return true; - } else { - return false; - } + return this.misbehaviourSets.keySet().contains(uri); } public boolean isTrustworthinessAttributeSets(String uri) { - if (this.trustworthinessAttributeSets.keySet().contains(uri)) { - return true; - } else { - return false; - } + return this.trustworthinessAttributeSets.keySet().contains(uri); } public boolean isThreat(String uri) { - if (this.threats.keySet().contains(uri)) { - return true; - } else { - return false; - } + return this.threats.keySet().contains(uri); } public ThreatDB getThreat(String uri) { @@ -564,10 +491,7 @@ public void printThreatUris() { } public boolean isSecondaryThreat(String uri) { - if (threats.keySet().contains(uri) && (threats.get(uri).getSecondaryEffectConditions().size() > 0)) { - return true; - } - return false; + return threats.containsKey(uri) && threats.get(uri).getSecondaryEffectConditions().size() > 0; } public boolean isRootCause(String uri) { @@ -587,29 +511,216 @@ public String getLikelihood(String uri) { return ""; } - // check MS list exists, no point going futher - public boolean checkMisbehaviourList(List misbehaviours) { - boolean retVal = true; + /** + * Check risk calculation mode is the same as the requested one + * @param input + * @return + */ + public boolean checkRiskCalculationMode(String input) { + ModelDB model = querier.getModelInfo("system"); + logger.info("Model info: {}", model); + + RiskCalculationMode modelRiskCalculationMode; + RiskCalculationMode requestedMode; + + try { + logger.info("riskCalculationMode: {}", model.getRiskCalculationMode()); + modelRiskCalculationMode = model.getRiskCalculationMode() != null ? RiskCalculationMode.valueOf(model.getRiskCalculationMode()) : null; + requestedMode = RiskCalculationMode.valueOf(input); + + return modelRiskCalculationMode == requestedMode; + + } catch (IllegalArgumentException e) { + return false; + } + } + + public boolean checkRiskLevelKey(String riskKey) { + return riLevels.containsKey(riskKey); + } - for (String misb : misbehaviours) { + /** Checks if all elements in the given list represent a valid misbehaviour + * set. + * + * This method iterates through the list of misbehaviour set identifiers + * and checks each one to determine if it corresponds to a valid + * misbehaviour set. + * + * @param misbehaviourSetList A list of misbehavour set short URIs as + * strings + * @return {@code true} if every identifier in the list corresponds to a valid + * misbehaviour set, otherwise {@code false}. + */ + public boolean checkMisbehaviourList(List misbehaviourSetList) { + + for (String misb : misbehaviourSetList) { if (!this.isMisbehaviourSet(misb)) { logger.warn("failed to identify MS: {}", misb); - retVal = false; - break; + return false; } } - return retVal; + return true; + } + + public AssetDTO fillAssetDTO(String assetUri) { + AssetDB asset = assets.get(assetUri); + AssetDTO assetDTO = new AssetDTO(); + assetDTO.setUri(asset.getUri()); + assetDTO.setType(asset.getType()); + assetDTO.setLabel(asset.getLabel()); + assetDTO.setIdentifier(asset.getId()); + return assetDTO; + } + + public ControlDTO fillControlDTO(String csUri) { + ControlDTO ctrl = new ControlDTO(); + ControlSetDB cs = controlSets.get(csUri); + ControlDB control = querier.getControl(cs.getControl(), "domain"); + + ctrl.setUri(csUri); + ctrl.setLabel(control.getLabel()); + ctrl.setDescription(control.getDescription()); + ctrl.setAsset(fillAssetDTO(cs.getLocatedAt())); + ctrl.setAction("Enable control"); + + return ctrl; + } + + public void changeCS(Set csSet, boolean proposed) { + logger.info("changeCS list ({} {}): {}", proposed ? "enabling" : "disabling", csSet.size(), csSet); + + for (String csURIa : csSet) { + + logger.debug(" └──> {}", csURIa); + + Set csTriplet = QuerierUtils.getControlTriplet(csURIa); + + for (String csURI : csTriplet) { + logger.debug(" Set triplet {}: proposed -> {}", csURI, proposed); + querier.updateProposedStatus(proposed, csURI, "system"); + } + + } + + } + + public RiskVector calculateRisk(String modelId, RiskCalculationMode riskMode) throws RuntimeException { + try { + + RiskCalculator rc = new RiskCalculator(querier); + rc.calculateRiskLevels(riskMode, false, new Progress(modelId)); + + updateDatasets(); + + return getRiskVector(); + } catch (Exception e) { + logger.error("Error calculating risks for APD", e); + throw new RuntimeException("Failed to calculate risk", e); + } + + } + + public RiskVector getRiskVector() { + + Map riskVector = new HashMap<>(); + Collection rvRiskLevels = new ArrayList<>(); + for (LevelDB level : riLevels.values()) { + riskVector.put(level.getUri(), 0); + Level l = new Level(); + l.setValue(Integer.valueOf(level.getLevelValue())); + l.setUri(level.getUri()); + rvRiskLevels.add(l); + } + + for (MisbehaviourSetDB ms : misbehaviourSets.values()) { + riskVector.put(ms.getRisk(), riskVector.get(ms.getRisk()) + 1); + } + + return new RiskVector(rvRiskLevels, riskVector); + } + + public String validateRiskLevel(String uri) { + return uri; } /** - * capitilise string - * - * @param str - * @return + * Compare two risk levels specified by URI fragments + */ + public int compareRiskLevelURIs(String overallRiskA, String overallRiskB) { + logger.debug("Overall Risk Comparison: riskA({}) ? riskB({})", overallRiskA, overallRiskB); + + int levelA = riLevels.get(overallRiskA).getLevelValue(); + int levelB = riLevels.get(overallRiskB).getLevelValue(); + + // Compare levelA and levelB and return -1, 0, or 1 + return Integer.compare(levelA, levelB); + } + + /* + * Compare the risk levels of a list of misbehaviour sets with another single level */ - private String capitaliseString(String str) { - return str.substring(0, 1).toUpperCase() + str.substring(1); + public int compareMSListRiskLevel(List targetMSURIs, String otherRiskURI) { + int targetRiskLevel = riLevels.get(otherRiskURI).getLevelValue(); + int maxRiskLevel = 0; + + for (String msURI : targetMSURIs) { + int riskLevel = riLevels.get(misbehaviourSets.get(msURI).getRisk()).getLevelValue(); + if (riskLevel > maxRiskLevel) { + maxRiskLevel = riskLevel; + } + } + + return Integer.compare(maxRiskLevel, targetRiskLevel); } + public StateDTO getState() { + // state is risk + list of consequences + + Map riskVector = new HashMap<>(); + Collection rvRiskLevels = new ArrayList<>(); + for (LevelDB level : riLevels.values()) { + riskVector.put(level.getUri(), 0); + Level l = new Level(); + l.setValue(Integer.valueOf(level.getLevelValue())); + l.setUri(level.getUri()); + rvRiskLevels.add(l); + } + + List consequences = new ArrayList<>(); + for (MisbehaviourSetDB ms : misbehaviourSets.values()) { + + riskVector.put(ms.getRisk(), riskVector.get(ms.getRisk()) + 1); + int threshold = riLevels.get("domain#RiskLevelMedium").getLevelValue(); + if (riLevels.get(ms.getRisk()).getLevelValue() >= threshold) { + MisbehaviourDB msdb = querier.getMisbehaviour(ms.getMisbehaviour(), "domain"); + ConsequenceDTO consequence = new ConsequenceDTO(); + consequence.setLabel(msdb.getLabel().replaceAll("(? getAllCS() { + Set css = new HashSet<>(); + for (ControlSetDB cs : controlSets.values()) { + css.add(cs.getUri()); + } + return css; + } } diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackTree.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackTree.java index ec42477f..43ef5eea 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackTree.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/AttackTree.java @@ -29,6 +29,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -119,8 +120,10 @@ public AttackTree(List targetUris, boolean futureRisk, boolean shortestP this.backtrace(true); } else { /* - * If the shortest path is required then we get the URIRefs of the shortest path nodes from the first pass at the ThreatTree then discard all TreeNodes and - * create a new ThreatTree which is bounded by the shortest path URIRefs. + * If the shortest path is required then we get the URIRefs of the + * shortest path nodes from the first pass at the ThreatTree then + * discard all TreeNodes and create a new ThreatTree which is + * bounded by the shortest path URIRefs. */ logger.info("***********************"); logger.info("RUNNING FIRST backtrace"); @@ -128,7 +131,7 @@ public AttackTree(List targetUris, boolean futureRisk, boolean shortestP this.backtrace(false); - this.boundingUriRefs = new HashSet(); + this.boundingUriRefs = new HashSet<>(); for (AttackNode node : this.shortestPathNodes()) { this.boundingUriRefs.add(node.getUri()); } @@ -219,8 +222,6 @@ private Set shortestPathNodes() { * ones which have at least one child further away than the node, remove the others and iterate until no change. */ - // TODO: review this as it looks liek it's not quite working - Set spn = this.nodes().stream().collect(Collectors.toSet()); while (true) { Set goodNodes = new HashSet<>(); @@ -265,22 +266,10 @@ private Set nodes() { return filteredSet; } - /** - * Gets a list of all the AttackNodes in the AttackTree that are not in the error state, i.e. not not-a-cause. - * - * @return A list of all the AttackNodes in the AttackTree. - */ - private List excludedNodes() { - // Don't return the nodes that are the error state - List filteredList; - filteredList = this.nodeByUri.values().stream().filter(node -> node.getNotACause()) - .collect(Collectors.toList()); - return filteredList; - } private void addMaxDistanceFromTarget(String uriRef, List currentPath) { if (currentPath == null) { - currentPath = new ArrayList(); + currentPath = new ArrayList<>(); } List copyCP = new ArrayList<>(); @@ -314,25 +303,6 @@ private List uris() { return filteredList; } - private Set rootCauses() { - Set uriSet = new HashSet<>(); - for (AttackNode an : this.nodes()) { - if (an.isRootCause()) { - uriSet.add(an.getUri()); - } - } - return uriSet; - } - - private Set externalCauses() { - Set uriSet = new HashSet<>(); - for (AttackNode an : this.nodes()) { - if (an.isExternalCause()) { - uriSet.add(an.getUri()); - } - } - return uriSet; - } private Set normalOperations() { Set uriSet = new HashSet<>(); @@ -373,7 +343,7 @@ Set initialCauses() { private void followPath(String uri, List cPath, Map pathNodes) { if (cPath == null) { - cPath = new ArrayList(); + cPath = new ArrayList<>(); } cPath.add(uri); AttackNode cNode = this.nodeByUri.get(uri); @@ -413,7 +383,8 @@ public Graph createGraphDoc(Map fNodes, Set links) { List> treeLinks = new ArrayList<>(); // create nodes lists - for (String nodeUri : fNodes.keySet()) { + for (Iterator it = fNodes.keySet().iterator(); it.hasNext();) { + String nodeUri = it.next(); AttackNode node = this.nodeByUri.get(nodeUri); if (node.isThreat()) { threats.put(nodeUri, fNodes.get(nodeUri)); @@ -436,10 +407,8 @@ public Graph createGraphDoc(Map fNodes, Set links) { } } - Graph graph = new Graph(this.sortedMap(threats), this.sortedMap(misbehaviours), this.sortedMap(twas), + return new Graph(this.sortedMap(threats), this.sortedMap(misbehaviours), this.sortedMap(twas), treeLinks); - - return graph; } /** @@ -483,26 +452,17 @@ public TreeJsonDoc calculateTreeJsonDoc(boolean allPaths, boolean normalOp) { graphs.put(targetMS, graph); } - TreeJsonDoc treeJsonDoc = new TreeJsonDoc(graphs); - - return treeJsonDoc; - } - - private LogicalExpression attackMitigationCSG() { - List leList = new ArrayList<>(); - for (String uri : this.targetUris) { - leList.add(this.nodeByUri.get(uri).getControlStrategies()); - } - logger.debug("attackMitigationCSG LE size: {}", leList.size()); - return new LogicalExpression(this.apd, new ArrayList(leList), true); + return new TreeJsonDoc(graphs); } - private LogicalExpression attackMitigationCS() { + public LogicalExpression attackMitigationCSG() { List leList = new ArrayList<>(); for (String uri : this.targetUris) { - leList.add(this.nodeByUri.get(uri).getControls()); + leList.add(this.nodeByUri.get(uri).getAttackTreeMitigationCSG()); } - return new LogicalExpression(this.apd, new ArrayList(leList), true); + + logger.debug("attackMitigationCSG target uris: {}", this.targetUris); + return new LogicalExpression(new ArrayList(leList), true); } private Set createLinks(Set nodes) { @@ -534,9 +494,7 @@ private void setRank(String nodeUri, int rank) { this.rankByUri.put(nodeUri, ranks); } - if (ranks.contains(rank)) { - return; - } else { + if (!ranks.contains(rank)) { ranks.add(rank); for (String causeUri : this.nodeByUri.get(nodeUri).getDirectCauseUris()) { this.setRank(causeUri, rank + 1); @@ -571,6 +529,7 @@ public void stats() { logger.debug("CSGs...............: {}", csgs.size()); logger.debug("CS.................: {}", controls.size()); logger.info("#################################"); + } } diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/CSGNode.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/CSGNode.java new file mode 100644 index 00000000..7a7050c9 --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/CSGNode.java @@ -0,0 +1,90 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-07-25 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.modelvalidator.attackpath; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.RecommendationDTO; + +public class CSGNode { + private List csgList; + private Set csList; + private List children; + private RecommendationDTO recommendation; + private int greaterEqualLess; + + public CSGNode() { + this(new ArrayList<>()); + } + + public CSGNode(List csgList) { + if (csgList == null) { + csgList = new ArrayList<>(); + } + this.csgList = csgList; + this.children = new ArrayList<>(); + this.recommendation = null; + this.csList = new HashSet<>(); + } + + public void addChild(CSGNode child) { + children.add(child); + } + + public Set getCsList() { + return this.csList; + } + + public List getCsgList() { + return this.csgList; + } + + public void setCsList(Set csList) { + this.csList = csList; + } + + public List getChildren() { + return this.children; + } + + public RecommendationDTO getRecommendation() { + return this.recommendation; + } + + public void setRecommendation(RecommendationDTO rec) { + this.recommendation = rec; + } + + public void setGreaterEqualLess(int val) { + greaterEqualLess = val; + } + + public int getGreaterEqualLess() { + return greaterEqualLess; + } +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/LogicalExpression.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/LogicalExpression.java index d0afb320..d49770b1 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/LogicalExpression.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/LogicalExpression.java @@ -36,35 +36,36 @@ import com.bpodgursky.jbool_expressions.And; import com.bpodgursky.jbool_expressions.Expression; import com.bpodgursky.jbool_expressions.Or; +import com.bpodgursky.jbool_expressions.Variable; import com.bpodgursky.jbool_expressions.rules.RuleSet; public class LogicalExpression { - // private static final Logger logger = LoggerFactory.getLogger(AttackNode.class); + private static final Logger logger = LoggerFactory.getLogger(LogicalExpression.class); + + private static int instanceCount = 0; // Static counter variable private boolean allRequired; private List> allCauses = new ArrayList<>(); private Expression cause; - public LogicalExpression(AttackPathDataset ds, List cList, boolean ar) { + public LogicalExpression(List cList, boolean ar) { + + instanceCount++; this.allRequired = ar; - List> allCausesAux = new ArrayList<>(); for (Object causeObj : cList) { if (causeObj instanceof LogicalExpression) { LogicalExpression leObj = (LogicalExpression) causeObj; - allCausesAux.add(leObj.getCause()); + if (leObj.getCause() != null) { + allCauses.add(leObj.getCause()); + } } else { Expression exprObj = (Expression) causeObj; - allCausesAux.add(exprObj); - } - } - - // all_causes = [cc for cc in all_causes if cc is not None] - for (Expression cc : allCausesAux) { - if (cc != null) { - allCauses.add(cc); + if (exprObj != null) { + allCauses.add(exprObj); + } } } @@ -81,12 +82,11 @@ public LogicalExpression(AttackPathDataset ds, List cList, boolean ar) { this.cause = RuleSet.simplify(ors); } } - } public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("{"); + sb.append("LE{{"); Set uris = this.uris(); Iterator it = uris.iterator(); while (it.hasNext()) { @@ -95,7 +95,7 @@ public String toString() { sb.append(", "); } } - sb.append("}"); + sb.append("}}"); return sb.toString(); } @@ -111,11 +111,17 @@ public Expression getCause() { } } + /** + * Apply DNF to logical expression + * @param maxComplexity + */ public void applyDNF(int maxComplexity) { // apply DNF if (this.cause == null) { return; } + //TODO throw an exception if complexity is too high + // and caclulate complexity correctly. int causeComplexity = this.cause.getChildren().size(); if (causeComplexity <= maxComplexity) { this.cause = RuleSet.toDNF(RuleSet.simplify(this.cause)); @@ -123,24 +129,106 @@ public void applyDNF(int maxComplexity) { } public Set uris() { - Set symbolSetUris = new HashSet(); + Set symbolSetUris = new HashSet<>(); if (this.cause != null) { for (Expression symbol : this.cause.getChildren()) { symbolSetUris.add(symbol.toString()); } if (symbolSetUris.isEmpty()) { + logger.debug("EMPTY URI"); symbolSetUris.add(this.cause.toString()); } } return symbolSetUris; } - public String getCsgComment(String dummyUri) { - if (!dummyUri.startsWith("system#")) { - dummyUri = "system#" + dummyUri; + + /** + * Get list of OR terms + * @return + */ + public List getListFromOr() { + List retVal = new ArrayList<>(); + if (this.cause == null) { + logger.warn("Logical Expression cause is none"); + } else if (this.cause instanceof Or) { + for (Expression expr : this.cause.getChildren()) { + retVal.add(expr); + } + } else if (this.cause instanceof And) { + logger.warn("Logical Expression cause is And when Or was expected"); + retVal.add(this.cause); + } else { + logger.warn("Logical Expression operator not supported: {}", this.cause); } - // MyControlStrategy myCSG = new MyControlStrategy("", "", ""); - return ""; + + return retVal; } + /** + * Extract AND terms from logical expression + * @param expression + * @return + */ + public static List getListFromAnd(Expression expression) { + List retVal = new ArrayList<>(); + + if (expression instanceof And) { + for (Object obj : expression.getChildren()) { + retVal.add((Variable)obj); + } + } else if (expression instanceof Variable) { + retVal.add((Variable)expression); + } else { + logger.warn("Logical Expression operator not supported: {}", expression); + } + return retVal; + } + + /** + * Display logical expression in terms of Variables + */ + public void displayExpression() { + logger.debug("CSG LogicalExpression has the following terms:"); + parse(this.cause, 0); + } + + /** + * Parse Expression terms + * @param expression + * @param depth + */ + private void parse(Expression expression, int depth) { + StringBuilder indent = new StringBuilder(); + for (int i = 0; i < depth; i++) { + indent.append(" "); + } + + if (expression instanceof And) { + // Handle the 'And' expression + And andExpression = (And) expression; + logger.debug("{} AND(#{}", indent.toString(), andExpression.getChildren().size()); + for (Expression subExpr : andExpression.getChildren()) { + parse(subExpr, depth + 1); // Recursive call + } + logger.debug("{} )", indent); + } else if (expression instanceof Or) { + // Handle the 'Or' expression + Or orExpression = (Or) expression; + logger.debug("{} OR(#{}", indent, orExpression.getChildren().size()); + for (Expression subExpr : orExpression.getChildren()) { + parse(subExpr, depth + 1); // Recursive call + } + logger.debug("{} )", indent); + } else if (expression instanceof Variable) { + // Handle the 'Variable' expression + Variable variableExpression = (Variable) expression; + // Display the variable, e.g., print it + logger.debug("{} {}", indent, variableExpression.getValue().substring(11)); + } else { + // Handle other types of expressions if any, we should not reach + // here!!! + logger.warn("LE PARSER: unkown expression {}", expression); + } + } } diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/RecommendationsAlgorithm.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/RecommendationsAlgorithm.java new file mode 100644 index 00000000..1a58dcde --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/RecommendationsAlgorithm.java @@ -0,0 +1,625 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-01-24 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.modelvalidator.attackpath; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +import java.time.LocalDateTime; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.springframework.stereotype.Component; + +import uk.ac.soton.itinnovation.security.model.system.RiskCalculationMode; +import uk.ac.soton.itinnovation.security.model.system.RiskVector; +import uk.ac.soton.itinnovation.security.modelquerier.IQuerierDB; +import uk.ac.soton.itinnovation.security.modelquerier.dto.ModelDB; +import uk.ac.soton.itinnovation.security.modelvalidator.Progress; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.ControlDTO; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.ControlStrategyDTO; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.RecommendationDTO; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.RecommendationReportDTO; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.StateDTO; + +import com.bpodgursky.jbool_expressions.Expression; +import com.bpodgursky.jbool_expressions.Variable; + +import uk.ac.soton.itinnovation.security.systemmodeller.mongodb.RecommendationRepository; +import uk.ac.soton.itinnovation.security.systemmodeller.attackpath.RecommendationsService.RecommendationJobState; +import uk.ac.soton.itinnovation.security.systemmodeller.model.RecommendationEntity; + +@Component +public class RecommendationsAlgorithm { + + private static final Logger logger = LoggerFactory.getLogger(RecommendationsAlgorithm.class); + + private AttackPathDataset apd; + private IQuerierDB querier; + private String modelId; + private int recCounter = 0; + private RecommendationReportDTO report; + private String riskMode = "CURRENT"; + private String acceptableRiskLevel; + private List targetMS; + private RiskVector initialRiskVector; + private boolean localSearch; + private boolean abortFlag = false; + private RecommendationRepository recRepository; + private String jobId; + private RecommendationJobState finalState; + + // allPaths flag for single or double backtrace + private boolean shortestPath = true; + + // used to implement timeout + private Integer maxSecs; + private long maxEndTime; + + public RecommendationsAlgorithm(RecommendationsAlgorithmConfig config, Integer maxSecs) { + this.querier = config.getQuerier(); + this.modelId = config.getModelId(); + this.riskMode = config.getRiskMode(); + this.acceptableRiskLevel = config.getAcceptableRiskLevel(); + this.targetMS = config.getTargetMS(); + this.report = new RecommendationReportDTO(); + this.localSearch = config.getLocalSearch(); + this.maxSecs = maxSecs; + + initializeAttackPathDataset(); + } + + private void initializeAttackPathDataset() { + logger.debug("Preparing datasets ..."); + + apd = new AttackPathDataset(querier); + } + + public void setRecRepository(RecommendationRepository recRepository, String job) { + this.recRepository = recRepository; + this.jobId = job; + } + + public void setAbortFlag() { + this.abortFlag = true; + } + + public RecommendationJobState getFinalState() { + return finalState; + } + + /** + * Check risk calculation mode is the same as the requested one + * @param input + * @return + */ + public boolean checkRiskCalculationMode(String input) { + ModelDB model = querier.getModelInfo("system"); + logger.info("Model info: {}", model); + + RiskCalculationMode modelRiskCalculationMode; + RiskCalculationMode requestedMode; + + try { + logger.info("riskCalculationMode: {}", model.getRiskCalculationMode()); + modelRiskCalculationMode = model.getRiskCalculationMode() != null ? RiskCalculationMode.valueOf(model.getRiskCalculationMode()) : null; + requestedMode = RiskCalculationMode.valueOf(input); + + return modelRiskCalculationMode == requestedMode; + + } catch (IllegalArgumentException e) { + return false; + } + } + + /** + * wrapper method for check existing risk calculation mode + * @param requestedRiskMode + */ + public void checkRequestedRiskCalculationMode(String requestedRiskMode) { + if (!checkRiskCalculationMode(requestedRiskMode)) { + logger.debug("mismatch between the stored risk calculation mode and the requested one"); + throw new RuntimeException("mismatch between the stored risk calculation mode and the requested one"); + } + } + + /** + * Calculate the attack tree + * @return the attack graph + */ + private AttackTree calculateAttackTree() { + if (!targetMS.isEmpty()) { + logger.debug("caclulate attack tree using MS list: {}", targetMS); + return calculateAttackTree(targetMS); + } else { + logger.debug("caclulate attack tree using acceptable risk level: {}", acceptableRiskLevel); + return calculateAttackTree(apd.filterMisbehavioursByRiskLevel(acceptableRiskLevel)); + } + } + + /** + * Calculate the attack tree + * @param targetUris + * @return the attack graph + * @throws RuntimeException + */ + private AttackTree calculateAttackTree(List targetUris) throws RuntimeException { + logger.debug("calculate attack tree with isFUTURE: {}, shortestPath: {}", riskMode, shortestPath); + logger.debug("target URIs: {}", targetUris); + + boolean isFutureRisk = apd.isFutureRisk(riskMode); + AttackTree attackTree = null; + + try { + final long startTime = System.currentTimeMillis(); + attackTree = new AttackTree(targetUris, isFutureRisk, shortestPath, apd); + attackTree.stats(); + final long endTime = System.currentTimeMillis(); + logger.info("AttackPathAlgorithm.calculateAttackTree: execution time {} ms", endTime - startTime); + } catch (Exception e) { + throw new RuntimeException(e); + } + + return attackTree; + } + + private RiskVector processOption(List csgList, Set csSet, CSGNode childNode) { + + // Calculate risk, and create a potential recommendation + RiskVector riskResponse = null; + RecommendationDTO recommendation = null; + try { + riskResponse = apd.calculateRisk(modelId, RiskCalculationMode.valueOf(riskMode)); + logger.debug("Risk calculation response: {}", riskResponse); + logger.debug("Overall model risk: {}", riskResponse.getOverall()); + StateDTO state = apd.getState(); + + recommendation = createRecommendation(csgList, csSet, state); + + if (report.getRecommendations() == null) { + report.setRecommendations(new ArrayList<>()); + } + + // store recommendation to node + childNode.setRecommendation(recommendation); + + // flag this recommendation if there is risk reduction + childNode.setGreaterEqualLess(initialRiskVector.compareTo(riskResponse)); + + } catch (Exception e) { + logger.error("failed to get risk calculation, restore model"); + + // restore model ... + apd.changeCS(csSet, false); + + // raise exception since failed to run risk calculation + throw new RuntimeException(e); + } + + return riskResponse; + } + + private Set extractCS(List csgList) { + + Set csSet = new HashSet<>(); + for (String csg : csgList) { + for (String cs : apd.getCsgInactiveControlSets(csg)) { + csSet.add(cs); + } + } + + logger.debug("CS set for LE CSG_option {}", csgList); + logger.debug(" └──> {}", csSet); + + return csSet; + } + + private void updateJobState(RecommendationJobState newState) { + // get job status: + Optional optionalRec = recRepository.findById(jobId); + logger.debug("updating job status: {}", optionalRec); + optionalRec.ifPresent(rec -> { + rec.setState(newState); + rec.setModifiedAt(LocalDateTime.now()); + recRepository.save(rec); + }); + } + + private boolean checkJobAborted(){ + // get job status: + Optional jobState = recRepository.findById(jobId).map(RecommendationEntity::getState); + logger.debug("APPLY CSG: check task status: {}", jobState); + if (jobState.isPresent() && jobState.get() == RecommendationJobState.ABORTED) { + logger.debug("APPLY CSG: Got job status, cancelling this task"); + this.finalState = RecommendationJobState.ABORTED; + setAbortFlag(); + } + return abortFlag; + } + + private boolean checkJobTimedOut() { + boolean timedOut = System.currentTimeMillis() > this.maxEndTime; + if (timedOut) { + logger.warn("JOB TIMED OUT"); + this.finalState = RecommendationJobState.TIMED_OUT; + } + else { + logger.debug("JOB NOT YET TIMED OUT"); + } + return timedOut; + } + + private CSGNode applyCSGs(LogicalExpression le) { + CSGNode node = new CSGNode(); + return applyCSGs(le, node, "", apd.getRiskVector()); + } + + /** + * Build CSG recommendations tree. + * The method is recursive and will create a tree of CSG options. + * @param le + * @param myNode + * @param parentStep + * @param parentRiskVector + * @return + */ + private CSGNode applyCSGs(LogicalExpression le, CSGNode myNode, String parentStep, RiskVector parentRiskVector) { + logger.debug("applyCSGs() with parentStep: {}", parentStep); + + // convert LE to DNF + le.applyDNF(300); + + // convert from CSG logical expression to list of CSG options + List csgOptions = le.getListFromOr(); + + logger.debug("Derived DNF (OR) CSG expressions: {} (options).", csgOptions.size()); + for (Expression csgOption : csgOptions) { + logger.debug(" └──> {}", csgOption); + } + + // examine CSG options + int csgOptionCounter = 0; + for (Expression csgOption : csgOptions) { + + // avoid checking job state if jobId is not defined + if (jobId != null && !jobId.isEmpty()) { + // check if job is aborted or timed out: + if (checkJobAborted() || checkJobTimedOut()) { + break; + } else { + updateJobState(RecommendationJobState.RUNNING); + } + } + + csgOptionCounter += 1; + String myStep = String.format("%s%d/%d", parentStep.equals("") ? "" : parentStep + "-", csgOptionCounter, csgOptions.size()); + logger.debug("examining CSG LE option {}: {}", myStep, csgOption); + + List options = LogicalExpression.getListFromAnd(csgOption); + + List csgList = new ArrayList<>(); + + for (Variable va : options) { + csgList.add(va.toString()); + } + logger.debug("CSG flattened list ({}): {}", csgList.size(), csgList); + + CSGNode childNode = new CSGNode(csgList); + myNode.addChild(childNode); + + // get available CS + Set csSet = extractCS(csgList); + + // store CS set in the node to reconstruct the final CS list + // correctly in the Recommendation report for nested iterations. + childNode.setCsList(csSet); + + logger.debug("CS set for LE CSG_option {}", csgOption); + logger.debug(" └──> {}", csSet); + + // apply all CS in the CS_set + if (csSet.isEmpty()) { + logger.warn("EMPTY csSet is found, skipping this CSG option"); + continue; + } + apd.changeCS(csSet, true); + + // Re-calculate risk now and create a potential recommendation + RiskVector riskResponse = processOption(csgList, csSet, childNode); + + // Check for success + // Finish if the maximum risk is below or equal to the acceptable risk level + // If we are constrained to some target MS, then we should only check the + // risk levels of the targets (otherwise it is likely it will never finish) + + boolean globalRiskAcceptable = targetMS.isEmpty() && apd.compareRiskLevelURIs(riskResponse.getOverall(), acceptableRiskLevel) <= 0; + boolean targetedRiskAcceptable = !targetMS.isEmpty() && apd.compareMSListRiskLevel(targetMS, acceptableRiskLevel) <= 0; + + if (globalRiskAcceptable || targetedRiskAcceptable) { + logger.debug("Success termination condition reached for {}", myStep); + } else { + logger.debug("Risk is still higher than {}", acceptableRiskLevel); + + // Check if we should abort + // If doing localSearch then stop searching (fail) if the risk vector is higher than the parent + // In this way we do not let the risk vector increase. We could make this softer by comparing + // the "overall risk level", i.e. the highest risk level of the current and parent vector + + if (localSearch && (riskResponse.compareTo(parentRiskVector) > 0)) { + logger.debug("Risk level has increased. Abort branch {}", myStep); + } else { + + // Carry on searching by recursing into the next level + + logger.debug("Recalculate nested attack path tree"); + AttackTree nestedAttackTree = calculateAttackTree(); + LogicalExpression nestedLogicalExpression = nestedAttackTree.attackMitigationCSG(); + applyCSGs(nestedLogicalExpression, childNode, myStep, riskResponse); + } + } + + // undo CS changes in CS_set + logger.debug("Undo CS controls ({})", csSet.size()); + apd.changeCS(csSet, false); + logger.debug("Re-run risk calculation after CS changes have been revoked"); + // TODO: optimise this + // This does more work than is necessary as we are going to run the risk calculation again in the next iteration. + // The reason it is here is because of the side effect of the calculateRisk method which updates the various cached data. + apd.calculateRisk(modelId, RiskCalculationMode.valueOf(riskMode)); + + logger.debug("Finished examining CSG LE option {}: {}", myStep, csgOption); + } + + logger.debug("return from applyCSGs() iteration with parentStep: {}", parentStep); + + return myNode; + } + + /** + * Create control strategy DTO object + * @param csgUri + * @return + */ + private ControlStrategyDTO createControlStrategyDTO(String csgUri) { + ControlStrategyDTO csgDto = new ControlStrategyDTO(); + csgDto.setUri(csgUri); + csgDto.setDescription(apd.getCSGDescription(csgUri)); + csgDto.setCategory(csgUri.contains("-Runtime") ? "Applicable" : "Conditional"); + return csgDto; + } + + /** + * Crete CSG DTO object + * @param csgList + * @return + */ + private Set createCSGDTO(List csgList) { + Set recCSGSet = new HashSet<>(); + for (String csgUri : csgList) { + recCSGSet.add(createControlStrategyDTO(csgUri)); + } + return recCSGSet; + } + + /** + * create control DTO + * @param ctrlUri + * @return + */ + private ControlDTO createControlDTO(String ctrlUri) { + return apd.fillControlDTO(ctrlUri); + } + + /** + * Crete control set DTO + * @param csSet + * @return + */ + private Set createCSDTO(Set csSet) { + Set recControlSet = new HashSet<>(); + for (String ctrlUri : csSet) { + recControlSet.add(createControlDTO(ctrlUri)); + } + return recControlSet; + } + + /** + * Create recommendation DTO object + * @param csgList + * @param csSet + * @param state + * @return + */ + private RecommendationDTO createRecommendation(List csgList, Set csSet, StateDTO state) { + RecommendationDTO recommendation = new RecommendationDTO(); + recommendation.setIdentifier(recCounter++); + recommendation.setState(state); + logger.debug("Creating a potential recommendation ID: {}", recommendation.getIdentifier()); + + Set csgDTOs = createCSGDTO(csgList); + Set controlDTOs = createCSDTO(csSet); + + recommendation.setControlStrategies(csgDTOs); + recommendation.setControls(controlDTOs); + + // N.B. the list of CSG and CS will be updated later if the recommendation is nested. + return recommendation; + } + + /** + * Update recommendation DTO object + * @param recommendation + * @param csgList + * @param csSet + */ + private void updateRecommendation(RecommendationDTO recommendation, List csgList, Set csSet) { + Set csgDTOs = createCSGDTO(csgList); + Set controlDTOs = createCSDTO(csSet); + + recommendation.setControlStrategies(csgDTOs); + recommendation.setControls(controlDTOs); + } + + /** + * Parse the CSGNode tree and find recommendations + * @param node + */ + private void makeRecommendations(CSGNode node) { + List path = new ArrayList<>(); + makeRecommendations(node, path); + } + + /** + * Parse the CSGNode tree and find recommendations + * @param node + * @param path + */ + private void makeRecommendations(CSGNode node, List path) { + + // if path is undefined, initalise it as empty list + if (path == null) { + path = new ArrayList<>(); + } + + // Create a new instance of the path list for the current recursive call + List currentPath = new ArrayList<>(path); + currentPath.add(node); + + if (node.getChildren().isEmpty()) { + if ((node.getRecommendation() != null) & (node.getGreaterEqualLess() > 0)){ + Set csgSet = reconstructCSGs(currentPath); + Set csSet = reconstructCSs(currentPath); + updateRecommendation(node.getRecommendation(), new ArrayList<>(csgSet), csSet); + report.getRecommendations().add(node.getRecommendation()); + } else { + logger.debug("skipping recommendation: {}", node.getRecommendation()); + } + } else { + for (CSGNode child : node.getChildren()){ + makeRecommendations(child, currentPath); + } + } + } + + /** + * Reconstruct CSGs for nested recommendations + * @param nodeList + * @return + */ + private Set reconstructCSGs(List nodeList) { + Set csgSet = new HashSet<>(); + for (CSGNode node : nodeList) { + for (String csg : node.getCsgList()) { + csgSet.add(csg); + } + } + return csgSet; + } + + /** + * Reconstruct CS for nested recommendations + * @param nodeList + * @return + */ + private Set reconstructCSs(List nodeList) { + Set csSet = new HashSet<>(); + for (CSGNode node : nodeList) { + for (String cs : node.getCsList()) { + csSet.add(cs); + } + } + return csSet; + } + + /** + * Start recommendations algorithm + * @param progress + * @return + */ + public RecommendationReportDTO recommendations(Progress progress) { + + logger.info("Recommendations core part (risk mode: {})", riskMode); + logger.warn("Job timeout: {} secs", maxSecs); + + // Set start time for recommendations + long startTime = System.currentTimeMillis(); + + // Determine end time for recommendations (i.e. after which no further iterations will be completed) + if (maxSecs != null) { + this.maxEndTime = startTime + maxSecs * 1000; + } + else { + logger.warn("No recommendations.timeout.secs property set. Not setting timeout..."); + this.maxEndTime = Long.MAX_VALUE; + } + + try { + progress.updateProgress(0.1, "Getting initial risk state"); + // get initial risk state + initialRiskVector = apd.calculateRisk(modelId, RiskCalculationMode.valueOf(riskMode)); + + StateDTO state = apd.getState(); + report.setCurrent(state); + + progress.updateProgress(0.2, "Calculating attack tree"); + AttackTree threatTree = calculateAttackTree(); + + // step: attackMitigationCSG? + LogicalExpression attackMitigationCSG = threatTree.attackMitigationCSG(); + attackMitigationCSG.displayExpression(); + + // step: rootNode? + progress.updateProgress(0.3, "Trying different control strategy options"); + CSGNode rootNode = applyCSGs(attackMitigationCSG); + + // step: makeRecommendations on rootNode? + logger.debug("MAKE RECOMMENDATIONS"); + progress.updateProgress(0.8, "Making recommendations"); + makeRecommendations(rootNode); + + progress.updateProgress(0.9, "Preparing report"); + List recommendations = report.getRecommendations() != null ? report.getRecommendations() : Collections.emptyList(); + logger.info("The Recommendations Report has: {} recommendations", recommendations.size()); + for (RecommendationDTO rec : recommendations) { + logger.debug(" recommendation: {}", rec.getState().getRisk()); + for (ControlStrategyDTO csgDTO : rec.getControlStrategies()) { + logger.debug(" └──> csgs: {}", csgDTO.getUri().substring(7)); + } + } + + } catch (Exception e) { + throw new RuntimeException(e); + } + + return report; + } + +} + diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/RecommendationsAlgorithmConfig.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/RecommendationsAlgorithmConfig.java new file mode 100644 index 00000000..a7c4b4da --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/RecommendationsAlgorithmConfig.java @@ -0,0 +1,100 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-07-25 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.modelvalidator.attackpath; + +import java.util.ArrayList; +import java.util.List; + +import uk.ac.soton.itinnovation.security.modelquerier.IQuerierDB; + +public class RecommendationsAlgorithmConfig { + private IQuerierDB querier; + private String modelId; + private String riskMode; + private String acceptableRiskLevel; + private List targetMS; + private boolean localSearch; + + public RecommendationsAlgorithmConfig(IQuerierDB querier, String modelId, String riskMode, boolean localSearch, String level, List targets) { + this.querier = querier; + this.modelId = modelId; + this.riskMode = riskMode; + this.acceptableRiskLevel = level; + if (targets == null) { + this.targetMS = new ArrayList<>(); + } else { + this.targetMS = targets; + } + this.localSearch = localSearch; + } + + public IQuerierDB getQuerier() { + return querier; + } + + public String getModelId() { + return modelId; + } + + public String getRiskMode() { + return riskMode; + } + + public void setQuerier(IQuerierDB querier) { + this.querier = querier; + } + + public void setModelId(String modelId) { + this.modelId = modelId; + } + + public void setRiskMode(String riskMode) { + this.riskMode = riskMode; + } + + public String getAcceptableRiskLevel() { + return this.acceptableRiskLevel; + } + + public void setAcceptableRiskLevel(String level) { + acceptableRiskLevel = level; + } + + public List getTargetMS() { + return this.targetMS; + } + + public void setTargetMS(List targets) { + this.targetMS = targets; + } + + public Boolean getLocalSearch() { + return this.localSearch; + } + + public void setLocalSearch(Boolean flag) { + this.localSearch = flag; + } +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/dto/RiskVectorDB.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/dto/RiskVectorDB.java new file mode 100644 index 00000000..8b35eeaf --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/dto/RiskVectorDB.java @@ -0,0 +1,122 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-09-01 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.modelvalidator.attackpath.dto; + +import java.util.List; +import java.util.Map; + +public class RiskVectorDB implements Comparable { + private int veryHigh; + private int high; + private int medium; + private int low; + private int veryLow; + + public RiskVectorDB() { + veryHigh = 0; + high = 0; + medium = 0; + low = 0; + veryLow = 0; + } + + public RiskVectorDB(int vHigh, int high, int med, int low, int vLow) { + this.veryHigh = vHigh; + this.high = high; + this.medium = med; + this.low = low; + this.veryLow = vLow; + } + + @Override + public int compareTo(RiskVectorDB other) { + if (this.equals(other)) { + return 0; + } else if (this.greaterThan(other)) { + return 1; + } else if (this.lessThan(other)) { + return -1; + } else { + // it should not happen? + return -2; + } + } + + + public boolean equals(RiskVectorDB other) { + if (this == other) return true; + if (other == null || getClass() != other.getClass()) return false; + return low == other.low && + veryLow == other.veryLow && + medium == other.medium && + high == other.high && + veryHigh == other.veryHigh; + } + + public boolean greaterThan(RiskVectorDB other) { + if (veryHigh - other.veryHigh > 0) { + return true; + } else if (veryHigh - other.veryHigh < 0) { + return false; + } else if (high - other.high > 0) { + return true; + } else if (high - other.high < 0) { + return false; + } else if (medium - other.medium > 0) { + return true; + } else if (medium - other.medium < 0) { + return false; + } else if (low - other.low > 0) { + return true; + } else if (low - other.low < 0) { + return false; + } else { + return veryLow - other.veryLow > 0; + } + } + + public boolean lessThan(RiskVectorDB other) { + if (veryHigh - other.veryHigh < 0) { + return true; + } else if (veryHigh - other.veryHigh > 0) { + return false; + } else if (high - other.high < 0) { + return true; + } else if (high - other.high > 0) { + return false; + } else if (medium - other.medium < 0) { + return true; + } else if (medium - other.medium > 0) { + return false; + } else if (low - other.low < 0) { + return true; + } else if (low - other.low > 0) { + return false; + } else { + return veryLow - other.veryLow < 0; + } + } + +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/dto/TreeJsonDoc.java b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/dto/TreeJsonDoc.java index 3fefdd8e..7e6991d4 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/dto/TreeJsonDoc.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/modelvalidator/attackpath/dto/TreeJsonDoc.java @@ -28,7 +28,7 @@ public class TreeJsonDoc { // TODO: should pass it as a parameter - private static final String uriPrefix = "http://it-innovation.soton.ac.uk/ontologies/trustworthiness/"; + private static final String URI_PREFIX = "http://it-innovation.soton.ac.uk/ontologies/trustworthiness/"; private Map graphs; public TreeJsonDoc(Map graphs) { @@ -36,7 +36,7 @@ public TreeJsonDoc(Map graphs) { } public String getUriPrefix() { - return uriPrefix; + return URI_PREFIX; } public Map getGraphs() { diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/SystemModellerApplication.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/SystemModellerApplication.java index 2e95627d..69ef0955 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/SystemModellerApplication.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/SystemModellerApplication.java @@ -34,11 +34,13 @@ import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.Configuration; +import org.springframework.scheduling.annotation.EnableAsync; @Configuration @EnableAutoConfiguration @ComponentScan @SpringBootApplication +@EnableAsync public class SystemModellerApplication extends SpringBootServletInitializer{ @Override protected SpringApplicationBuilder configure(SpringApplicationBuilder application) { diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/attackpath/RecommendationsService.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/attackpath/RecommendationsService.java new file mode 100644 index 00000000..b2f04abb --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/attackpath/RecommendationsService.java @@ -0,0 +1,158 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-01-24 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.attackpath; + +import org.springframework.stereotype.Service; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.LocalDateTime; +import java.util.Optional; + +import uk.ac.soton.itinnovation.security.modelvalidator.Progress; +import uk.ac.soton.itinnovation.security.modelvalidator.attackpath.RecommendationsAlgorithm; +import uk.ac.soton.itinnovation.security.modelvalidator.attackpath.RecommendationsAlgorithmConfig; +import uk.ac.soton.itinnovation.security.systemmodeller.model.RecommendationEntity; +import uk.ac.soton.itinnovation.security.systemmodeller.mongodb.RecommendationRepository; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.RecommendationReportDTO; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.RiskModeMismatchException; + +@Service +public class RecommendationsService { + + private static final Logger logger = LoggerFactory.getLogger(RecommendationsService.class); + + @Autowired + private RecommendationRepository recRepository; + + @Value("${recommendations.timeout.secs: 900}") + private Integer recommendationsTimeoutSecs; + + public void startRecommendationTask(String jobId, RecommendationsAlgorithmConfig config, Progress progress) { + + logger.debug("startRecommendationTask for {}", jobId); + logger.debug("recommendationsTimeoutSecs: {}", this.recommendationsTimeoutSecs); + + // create recEntry and save it to mongo db + RecommendationEntity recEntity = new RecommendationEntity(); + recEntity.setId(jobId); + recEntity.setModelId(config.getModelId()); + recEntity.setState(RecommendationJobState.STARTED); + recRepository.save(recEntity); + logger.debug("rec entity saved for {}", recEntity.getId()); + + try { + RecommendationsAlgorithm reca = new RecommendationsAlgorithm(config, recommendationsTimeoutSecs); + + if (!reca.checkRiskCalculationMode(config.getRiskMode())) { + throw new RiskModeMismatchException(); + } + + reca.setRecRepository(recRepository, jobId); + + RecommendationReportDTO report = reca.recommendations(progress); + + storeRecReport(jobId, report); + + RecommendationJobState finalState = reca.getFinalState() != null ? reca.getFinalState() : RecommendationJobState.FINISHED; + updateRecommendationJobState(jobId, finalState); + } catch (Exception e) { + updateRecommendationJobState(jobId, RecommendationJobState.FAILED); + } + } + + public void updateRecommendationJobState(String recId, RecommendationJobState newState, String msg) { + Optional optionalRec = recRepository.findById(recId); + optionalRec.ifPresent(rec -> { + rec.setState(newState); + rec.setMessage(msg); + rec.setModifiedAt(LocalDateTime.now()); + recRepository.save(rec); + }); + } + + public void updateRecommendationJobState(String recId, RecommendationJobState newState) { + Optional optionalRec = recRepository.findById(recId); + optionalRec.ifPresent(rec -> { + rec.setState(newState); + rec.setModifiedAt(LocalDateTime.now()); + recRepository.save(rec); + }); + } + + public void storeRecReport(String jobId, RecommendationReportDTO report) { + Optional optionalRec = recRepository.findById(jobId); + optionalRec.ifPresent(job -> { + job.setReport(report); + job.setState(RecommendationJobState.FINISHED); + job.setModifiedAt(LocalDateTime.now()); + recRepository.save(job); + }); + } + + public Optional getRecommendationJobState(String jobId) { + return recRepository.findById(jobId).map(RecommendationEntity::getState); + } + + public Optional getRecommendationJobMessage(String jobId) { + return recRepository.findById(jobId).map(RecommendationEntity::getMessage); + } + + + public Optional getRecReport(String jobId) { + return recRepository.findById(jobId).map(RecommendationEntity::getReport); + } + + public Optional getJobById(String jobId) { + return recRepository.findById(jobId); + } + + // TODO: the happy path for a job should be: created -> started-> running + // -> finished. + // For cancelled jobs the sequence should be: created -> started -> running + // -> aborted -> finished? + // + // Somehow the recommendation report should indicate what has happened to + // the job if it was cancelled, because cancelled jobs should still produce + // results. + // + // Then use the RecommendationEntity to store additional information, eg + // number of recommendations found. + // + public enum RecommendationJobState { + CREATED, + STARTED, + RUNNING, + FAILED, + FINISHED, + ABORTED, + TIMED_OUT, + UNKNOWN + } + +} + diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/model/RecommendationEntity.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/model/RecommendationEntity.java new file mode 100644 index 00000000..9dcbe5c8 --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/model/RecommendationEntity.java @@ -0,0 +1,66 @@ +package uk.ac.soton.itinnovation.security.systemmodeller.model; + +import org.springframework.data.annotation.CreatedDate; +import org.springframework.data.annotation.LastModifiedDate; +import org.springframework.data.annotation.Id; +import org.springframework.data.mongodb.core.mapping.Document; + +import java.time.LocalDateTime; + +import uk.ac.soton.itinnovation.security.systemmodeller.attackpath.RecommendationsService.RecommendationJobState; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.RecommendationReportDTO; + +@Document(collection = "recommendations") +public class RecommendationEntity { + + @Id + private String id; + private RecommendationReportDTO report; + private RecommendationJobState state; + private String message; + private String modelId; + private int validRec = 0; + private int totalReci = 0; + + @CreatedDate + private LocalDateTime createdAt; + + @LastModifiedDate + private LocalDateTime modifiedAt; + + // getters and setters + public void setState(RecommendationJobState state) { + this.state = state; + } + public RecommendationJobState getState() { + return this.state; + } + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + public void setReport(RecommendationReportDTO report) { + this.report = report; + } + public RecommendationReportDTO getReport() { + return this.report; + } + public void setModifiedAt(LocalDateTime modifiedAt) { + this.modifiedAt = modifiedAt; + } + public void setMessage(String msg) { + this.message = msg; + } + public String getMessage() { + return this.message; + } + public void setModelId(String modelId) { + this.modelId = modelId; + } + public String getModelId() { + return this.modelId; + } +} + diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/mongodb/RecommendationRepository.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/mongodb/RecommendationRepository.java new file mode 100644 index 00000000..c756d4f5 --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/mongodb/RecommendationRepository.java @@ -0,0 +1,10 @@ +package uk.ac.soton.itinnovation.security.systemmodeller.mongodb; + +import org.springframework.data.mongodb.repository.MongoRepository; + +import uk.ac.soton.itinnovation.security.systemmodeller.model.RecommendationEntity; + +public interface RecommendationRepository extends MongoRepository { + public RecommendationEntity findOneById(String id); +} + diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/AssetController.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/AssetController.java index f125614f..5114b422 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/AssetController.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/AssetController.java @@ -51,8 +51,6 @@ import uk.ac.soton.itinnovation.security.model.system.Asset; import uk.ac.soton.itinnovation.security.model.system.AssetGroup; -import uk.ac.soton.itinnovation.security.model.system.ComplianceSet; -import uk.ac.soton.itinnovation.security.model.system.ComplianceThreat; import uk.ac.soton.itinnovation.security.model.system.ControlSet; import uk.ac.soton.itinnovation.security.model.system.MetadataPair; import uk.ac.soton.itinnovation.security.model.system.Relation; @@ -63,7 +61,6 @@ import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.ControlsAndThreatsResponse; import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.CreateAssetResponse; import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.DeleteAssetResponse; -import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.DeleteRelationResponse; import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.UpdateAsset; import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.UpdateAssetCardinality; import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.UpdateAssetResponse; diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/DomainModelController.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/DomainModelController.java index 7b76fc68..4ba09876 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/DomainModelController.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/DomainModelController.java @@ -92,6 +92,9 @@ public Map> getDomainModels(){ String username = keycloakAdminClient.getCurrentUser().getUsername(); logger.debug("Getting domain models for: {}", username); + /* The user access control system for domain models is not working correctly and is not currently a required feature. + Therefore, the list of domain models returned for any user will be all domain models, for the time being. See issue #161 + // If this is the user's first login we need to add them to // the management graph and set their default domain model // access. @@ -105,6 +108,9 @@ public Map> getDomainModels(){ logger.debug("{}", x); return x; } + */ + + return storeModelManager.getDomainModels(); } @RequestMapping(value = "/upload", method = RequestMethod.POST) diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/EntityController.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/EntityController.java index 7b012b78..eb0b7ed3 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/EntityController.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/EntityController.java @@ -92,8 +92,6 @@ public ResponseEntity getEntitySystemThreat(@PathVariable String model AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -134,8 +132,6 @@ public ResponseEntity> getEntitySystemThreats(@PathVariabl AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -174,8 +170,6 @@ public ResponseEntity getEntitySystemMisbehaviourSet(@PathVar AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -218,8 +212,6 @@ public ResponseEntity> getEntitySystemMisbehaviou AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -267,8 +259,6 @@ public ResponseEntity getEntitySystemControlStrategy(@PathVar AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -309,8 +299,6 @@ public ResponseEntity> getEntitySystemControlStra AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -349,8 +337,6 @@ public ResponseEntity getEntitySystemControlSet(@PathVariable Stri AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -391,8 +377,6 @@ public ResponseEntity> getEntitySystemControlSets(@Pat AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -431,8 +415,6 @@ public ResponseEntity getEntitySystemAsset(@PathVariable String modelId AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -472,8 +454,6 @@ public ResponseEntity> getEntitySystemAssets(@PathVariable AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -512,8 +492,6 @@ public ResponseEntity getEntitySystemTWAS(@PathVa AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -554,8 +532,6 @@ public ResponseEntity> getEntitySyste AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -593,8 +569,6 @@ public ResponseEntity getEntityDomainTWA(@PathVariab AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -634,8 +608,6 @@ public ResponseEntity> getEntityDomainTW AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -673,8 +645,6 @@ public ResponseEntity getEntityDomainControl(@PathVariable String mod AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -714,8 +684,6 @@ public ResponseEntity> getEntityDomainControls(@PathVaria AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -754,8 +722,6 @@ public ResponseEntity getEntityDomainMisbehaviour(@PathVariable AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -796,8 +762,6 @@ public ResponseEntity> getEntityDomainMisbehaviours( AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -836,8 +800,6 @@ public ResponseEntity getEntityDomainLevel(@PathVariable String modelId AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -898,8 +860,6 @@ public ResponseEntity> getEntityDomainLevels(@PathVariable AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/ModelController.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/ModelController.java index f35377b2..f99f9ee9 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/ModelController.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/ModelController.java @@ -32,16 +32,18 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; +import java.net.URI; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.rmi.UnexpectedException; import java.text.SimpleDateFormat; -import java.util.Arrays; +import java.util.ArrayList; import java.util.Date; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.Executors; @@ -54,8 +56,6 @@ import javax.naming.SizeLimitExceededException; import javax.servlet.http.HttpServletRequest; -//import org.apache.jena.query.Dataset; - import org.keycloak.representations.idm.UserRepresentation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -67,6 +67,8 @@ import org.springframework.http.HttpStatus; import org.springframework.http.MediaType; import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; @@ -87,6 +89,9 @@ import uk.ac.soton.itinnovation.security.modelvalidator.ModelValidator; import uk.ac.soton.itinnovation.security.modelvalidator.Progress; import uk.ac.soton.itinnovation.security.modelvalidator.attackpath.AttackPathAlgorithm; +import uk.ac.soton.itinnovation.security.modelvalidator.attackpath.AttackPathDataset; +import uk.ac.soton.itinnovation.security.modelvalidator.attackpath.RecommendationsAlgorithm; +import uk.ac.soton.itinnovation.security.modelvalidator.attackpath.RecommendationsAlgorithmConfig; import uk.ac.soton.itinnovation.security.modelvalidator.attackpath.dto.TreeJsonDoc; import uk.ac.soton.itinnovation.security.semanticstore.AStoreWrapper; import uk.ac.soton.itinnovation.security.semanticstore.IStoreWrapper; @@ -97,23 +102,31 @@ import uk.ac.soton.itinnovation.security.systemmodeller.model.ModelFactory; import uk.ac.soton.itinnovation.security.systemmodeller.model.WebKeyRole; import uk.ac.soton.itinnovation.security.systemmodeller.mongodb.IModelRepository; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.JobResponseDTO; import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.LoadingProgress; import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.LoadingProgressResponse; import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.ModelDTO; import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.UpdateModelResponse; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations.RecommendationReportDTO; import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.BadRequestErrorException; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.BadRiskModeException; import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.InternalServerErrorException; import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.MisbehaviourSetInvalidException; import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.ModelException; import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.ModelInvalidException; import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.NotAcceptableErrorException; import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.NotFoundErrorException; +import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.RiskModeMismatchException; import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.UnprocessableEntityException; import uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions.UserForbiddenFromDomainException; import uk.ac.soton.itinnovation.security.systemmodeller.semantics.ModelObjectsHelper; import uk.ac.soton.itinnovation.security.systemmodeller.semantics.StoreModelManager; import uk.ac.soton.itinnovation.security.systemmodeller.util.ReportGenerator; import uk.ac.soton.itinnovation.security.systemmodeller.util.SecureUrlHelper; +import uk.ac.soton.itinnovation.security.systemmodeller.model.RecommendationEntity; +import uk.ac.soton.itinnovation.security.systemmodeller.mongodb.RecommendationRepository; +import uk.ac.soton.itinnovation.security.systemmodeller.attackpath.RecommendationsService; +import uk.ac.soton.itinnovation.security.systemmodeller.attackpath.RecommendationsService.RecommendationJobState; /** * Includes all operations of the Model Controller Service. @@ -141,12 +154,23 @@ public class ModelController { @Autowired private SecureUrlHelper secureUrlHelper; + @Autowired + private RecommendationsService recommendationsService; + + @Autowired + private RecommendationRepository recRepository; + @Value("${admin-role}") public String adminRole; @Value("${knowledgebases.install.folder}") private String kbInstallFolder; + private static final String VALIDATION = "Validation"; + private static final String RISK_CALCULATION = "Risk calculation"; + private static final String RECOMMENDATIONS = "Recommendations"; + private static final String STARTING = "starting"; + /** * Take the user IDs of the model owner, editor and modifier and look up the current username for them */ @@ -246,6 +270,14 @@ private Set getModelObjectsForUser(UserRepresentation user){ return models; } + private void warnIsValidating(String modelId, String modelWebkey) { + logger.warn("Model {} is currently validating - ignoring request {}", modelId, modelWebkey); + } + + private void warnIsCalculatingRisks(String modelId, String modelWebkey) { + logger.warn("Model {} is already calculating risks - ignoring request {}", modelId, modelWebkey); + } + /** * Returns a list of models for the current user. * @@ -633,17 +665,17 @@ public ResponseEntity validateModel(@PathVariable String modelWriteId) t String modelId = model.getId(); if (model.isValidating()) { - logger.warn("Model {} is already validating - ignoring request {}", modelId, modelWriteId); + warnIsValidating(modelId, modelWriteId); return new ResponseEntity<>(HttpStatus.ACCEPTED); } if (model.isCalculatingRisks()) { - logger.warn("Model {} is already calculating risks - ignoring request {}", modelId, modelWriteId); + warnIsCalculatingRisks(modelId, modelWriteId); return new ResponseEntity<>(HttpStatus.ACCEPTED); } Progress validationProgress = modelObjectsHelper.getValidationProgressOfModel(model); - validationProgress.updateProgress(0d, "Validation starting"); + validationProgress.updateProgress(0d, VALIDATION + " " + STARTING); logger.debug("Marking as validating model [{}] {}", modelId, model.getName()); model.markAsValidating(); @@ -677,7 +709,7 @@ public ResponseEntity validateModel(@PathVariable String modelWriteId) t return true; }, 0, TimeUnit.SECONDS); - modelObjectsHelper.registerValidationExecution(modelId, future); + modelObjectsHelper.registerTaskExecution(modelId, future); return new ResponseEntity<>(HttpStatus.ACCEPTED); } @@ -721,12 +753,12 @@ public ResponseEntity calculateRisks(@PathVariable String modelWriteId, String modelId = model.getId(); if (model.isValidating()) { - logger.warn("Model {} is currently validating - ignoring calc risks request {}", modelId, modelWriteId); + warnIsValidating(modelId, modelWriteId); return new ResponseEntity<>(HttpStatus.ACCEPTED); } if (model.isCalculatingRisks()) { - logger.warn("Model {} is already calculating risks - ignoring request {}", modelId, modelWriteId); + warnIsCalculatingRisks(modelId, modelWriteId); return new ResponseEntity<>(HttpStatus.ACCEPTED); } @@ -742,10 +774,8 @@ public ResponseEntity calculateRisks(@PathVariable String modelWriteId, RiskCalculationMode.values()); } - Progress validationProgress = modelObjectsHelper.getValidationProgressOfModel(model); - validationProgress.updateProgress(0d, "Risk calculation starting"); - - logger.debug("Marking as calculating risks [{}] {}", modelId, model.getName()); + Progress validationProgress = modelObjectsHelper.getTaskProgressOfModel(RISK_CALCULATION, model); + validationProgress.updateProgress(0d, RISK_CALCULATION + " " + STARTING); model.markAsCalculatingRisks(rcMode, true); ScheduledFuture future = Executors.newScheduledThreadPool(1).schedule(() -> { @@ -768,7 +798,7 @@ public ResponseEntity calculateRisks(@PathVariable String modelWriteId, return true; }, 0, TimeUnit.SECONDS); - modelObjectsHelper.registerValidationExecution(modelId, future); + modelObjectsHelper.registerTaskExecution(modelId, future); return new ResponseEntity<>(HttpStatus.ACCEPTED); } @@ -818,19 +848,17 @@ public ResponseEntity calculateRisksBlocking(@PathVariable St String modelId = model.getId(); if (model.isValidating()) { - logger.warn("Model {} is currently validating - ignoring calc risks request {}", modelId, modelWriteId); - return ResponseEntity.status(HttpStatus.OK).body(new RiskCalcResultsDB()); //TODO: may need to improve this + warnIsValidating(modelId, modelWriteId); + return ResponseEntity.status(HttpStatus.OK).body(new RiskCalcResultsDB()); } if (model.isCalculatingRisks()) { - logger.warn("Model {} is already calculating risks - ignoring request {}", modelId, modelWriteId); - return ResponseEntity.status(HttpStatus.OK).body(new RiskCalcResultsDB()); //TODO: may need to improve this + warnIsCalculatingRisks(modelId, modelWriteId); + return ResponseEntity.status(HttpStatus.OK).body(new RiskCalcResultsDB()); } - validationProgress = modelObjectsHelper.getValidationProgressOfModel(model); - validationProgress.updateProgress(0d, "Risk calculation starting"); - - logger.debug("Marking as calculating risks [{}] {}", modelId, model.getName()); + validationProgress = modelObjectsHelper.getTaskProgressOfModel(RISK_CALCULATION, model); + validationProgress.updateProgress(0d, RISK_CALCULATION + " " + STARTING); model.markAsCalculatingRisks(rcMode, save); } //synchronized block @@ -1241,7 +1269,26 @@ public ResponseEntity getRiskCalcProgress(@PathVariable String modelId synchronized(this) { final Model model = secureUrlHelper.getModelFromUrlThrowingException(modelId, WebKeyRole.READ); - return ResponseEntity.status(HttpStatus.OK).body(modelObjectsHelper.getValidationProgressOfModel(model)); + return ResponseEntity.status(HttpStatus.OK).body(modelObjectsHelper.getTaskProgressOfModel(RISK_CALCULATION, model)); + } + } + + /** + * Get an update on the progress of the recommendations operation, given the ID of the model. + * + * @param modelId + * @return recommendations progress + * @throws java.rmi.UnexpectedException + */ + @GetMapping(value = "/models/{modelId}/recommendationsprogress") + public ResponseEntity getRecommendationsProgress(@PathVariable String modelId) throws UnexpectedException { + logger.info("Called REST method to GET recommendations progress for model {}", modelId); + + synchronized(this) { + final Model model = secureUrlHelper.getModelFromUrlThrowingException(modelId, WebKeyRole.READ); + Progress progress = modelObjectsHelper.getTaskProgressOfModel(RECOMMENDATIONS, model); + logger.info("{}", progress); + return ResponseEntity.status(HttpStatus.OK).body(progress); } } @@ -1332,10 +1379,7 @@ public ResponseEntity calculateThreatGraph( try { RiskCalculationMode.valueOf(riskMode); } catch (IllegalArgumentException e) { - logger.error("Found unexpected riskCalculationMode parameter value {}, valid values are: {}.", - riskMode, RiskCalculationMode.values()); - throw new BadRequestErrorException("Invalid 'riskMode' parameter value " + riskMode + - ", valid values are: " + Arrays.toString(RiskCalculationMode.values())); + throw new BadRiskModeException(riskMode); } final Model model = secureUrlHelper.getModelFromUrlThrowingException(modelId, WebKeyRole.READ); @@ -1343,8 +1387,6 @@ public ResponseEntity calculateThreatGraph( AStoreWrapper store = storeModelManager.getStore(); try { - logger.info("Initialising JenaQuerierDB"); - JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), model.getModelStack(), false); @@ -1360,8 +1402,7 @@ public ResponseEntity calculateThreatGraph( } if (!apa.checkRiskCalculationMode(riskMode)) { - logger.error("mismatch in risk calculation mode found"); - throw new BadRequestErrorException("mismatch between the stored and requested risk calculation mode, please run the risk calculation"); + throw new RiskModeMismatchException(); } TreeJsonDoc treeDoc = apa.calculateAttackTreeDoc(targetURIs, riskMode, allPaths, normalOperations); @@ -1372,7 +1413,6 @@ public ResponseEntity calculateThreatGraph( logger.error("Threat graph calculation failed due to invalid misbehaviour set", e); throw e; } catch (BadRequestErrorException e) { - logger.error("mismatch between the stored and requested risk calculation mode, please run the risk calculation"); throw e; } catch (Exception e) { logger.error("Threat path failed due to an error", e); @@ -1381,4 +1421,174 @@ public ResponseEntity calculateThreatGraph( } } + /* + * This REST method generates a recommendation report, as an asynchronous call. + * Results may be downloaded once this task has completed. + * + * @param modelId the String representation of the model object to seacrh + * @param riskMode optional string indicating the prefered risk calculation mode (defaults to CURRENT) + * @param localSearch optional flag indicating whether to use local search (defaults to true) + * @param acceptableRiskLevel string indicating the acceptable risk level using domain model URI + * @param targetURIs optional list of target misbehaviour sets + * @return ACCEPTED status and jobId for the background task + * @throws InternalServerErrorException if an error occurs during report generation + */ + @GetMapping(value = "/models/{modelId}/recommendations") + public ResponseEntity calculateRecommendations( + @PathVariable String modelId, + @RequestParam(defaultValue = "CURRENT") String riskMode, + @RequestParam(defaultValue = "true") boolean localSearch, + @RequestParam String acceptableRiskLevel, + @RequestParam (required = false) List targetURIs) { + + // Check if targetURIs is null or empty and assign an empty list if it is + if (targetURIs == null) { + targetURIs = new ArrayList<>(); + } + + final List finalTargetURIs = targetURIs; + + logger.info("Calculating recommendations for model {}", modelId); + riskMode = riskMode.replaceAll("[\n\r]", "_"); + logger.info(" riskMode: {}",riskMode); + + RiskCalculationMode rcMode; + + try { + rcMode = RiskCalculationMode.valueOf(riskMode); + } catch (IllegalArgumentException e) { + throw new BadRiskModeException(riskMode); + } + + final String rm = riskMode; + + final Model model; + Progress progress; + + synchronized(this) { + model = secureUrlHelper.getModelFromUrlThrowingException(modelId, WebKeyRole.READ); + String mId = model.getId(); + + if (model.isValidating()) { + warnIsValidating(mId, modelId); + return new ResponseEntity<>(HttpStatus.ACCEPTED); + } + + if (model.isCalculatingRisks()) { + warnIsCalculatingRisks(mId, modelId); + return new ResponseEntity<>(HttpStatus.ACCEPTED); + } + + progress = modelObjectsHelper.getTaskProgressOfModel(RECOMMENDATIONS, model); + progress.updateProgress(0d, RECOMMENDATIONS + " " + STARTING); + model.markAsCalculatingRisks(rcMode, false); + } //synchronized block + + AStoreWrapper store = storeModelManager.getStore(); + + logger.info("Creating async job for {}", modelId); + String jobId = UUID.randomUUID().toString(); + logger.info("Submitting async job with id: {}", jobId); + + ScheduledFuture future = Executors.newScheduledThreadPool(1).schedule(() -> { + boolean success = false; + + try { + JenaQuerierDB querierDB = new JenaQuerierDB(((JenaTDBStoreWrapper) store).getDataset(), + model.getModelStack(), true); + + querierDB.initForRiskCalculation(); + + logger.info("Calculating recommendations"); + + AttackPathDataset apd = new AttackPathDataset(querierDB); + + // validate targetURIs (if set) + if (!apd.checkMisbehaviourList(finalTargetURIs)) { + logger.error("Invalid target URIs set"); + throw new MisbehaviourSetInvalidException("Invalid misbehaviour set"); + } + + // validate acceptable risk level + if (!apd.checkRiskLevelKey(acceptableRiskLevel)) { + logger.error("Invalid acceptableRiskLevel: {}", acceptableRiskLevel); + throw new MisbehaviourSetInvalidException("Invalid acceptableRiskLevel value"); + } + + RecommendationsAlgorithmConfig recaConfig = new RecommendationsAlgorithmConfig(querierDB, model.getId(), rm, localSearch, acceptableRiskLevel, finalTargetURIs); + recommendationsService.startRecommendationTask(jobId, recaConfig, progress); + + success = true; + } catch (BadRequestErrorException e) { + throw e; + } catch (Exception e) { + logger.error("Recommendations failed due to an error", e); + throw new InternalServerErrorException( + "Finding recommendations failed. Please contact support for further assistance."); + } finally { + //always reset the flags even if the risk calculation crashes + model.finishedCalculatingRisks(success, rcMode, false); + progress.updateProgress(1.0, "Recommendations complete"); + } + return true; + }, 0, TimeUnit.SECONDS); + + modelObjectsHelper.registerTaskExecution(model.getId(), future); + + // Build the Location URI for the job status + URI locationUri = URI.create("/models/" + modelId + "/recommendations/status/" + jobId); + + // Return 202 Accepted with a Location header + HttpHeaders headers = new HttpHeaders(); + headers.setLocation(locationUri); + + JobResponseDTO response = new JobResponseDTO(jobId, "CREATED"); + + return ResponseEntity.accepted().headers(headers).body(response); + } + + @PostMapping("/models/{modelId}/recommendations/{jobId}/cancel") + public ResponseEntity cancelRecJob( + @PathVariable String modelId, @PathVariable String jobId) { + + logger.info("Got request to cancel recommendation task for model: {}, jobId: {}", modelId, jobId); + + synchronized(this) { + final Model model = secureUrlHelper.getModelFromUrlThrowingException(modelId, WebKeyRole.WRITE); + Progress progress = modelObjectsHelper.getTaskProgressOfModel(RECOMMENDATIONS, model); + progress.setMessage("Cancelling"); + recommendationsService.updateRecommendationJobState(jobId, RecommendationJobState.ABORTED, "job cancelled"); + } + + return new ResponseEntity<>(HttpStatus.OK); + } + + @GetMapping("/models/{modelId}/recommendations/{jobId}/status") + public ResponseEntity checkRecJobStatus( + @PathVariable String modelId, @PathVariable String jobId) { + + logger.info("Got request for jobId {} status", jobId); + + Optional optionalState = recommendationsService.getRecommendationJobState(jobId); + String stateAsString = optionalState.map(state -> state.toString()).orElse("UNKNOWN"); + + Optional optionalMessage = recommendationsService.getRecommendationJobMessage(jobId); + String message = optionalMessage.map(msg -> msg.toString()).orElse(""); + + JobResponseDTO response = new JobResponseDTO(jobId, stateAsString, message); + + return ResponseEntity.ok().body(response); + } + + @GetMapping("/models/{modelId}/recommendations/{jobId}/result") + public ResponseEntity downloadRecommendationsReport( + @PathVariable String modelId, @PathVariable String jobId) { + + logger.debug("Got download request for jobId: {}", jobId); + + return recommendationsService.getRecReport(jobId) + .map(ResponseEntity::ok) + .orElse(ResponseEntity.notFound().build()); + } + } diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/JobResponseDTO.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/JobResponseDTO.java new file mode 100644 index 00000000..c9072b7f --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/JobResponseDTO.java @@ -0,0 +1,58 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-12-23 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.rest.dto; + +public class JobResponseDTO { + + private String jobId; + private String state; + private String message; + + public JobResponseDTO(String jobId, String stateName) { + this.jobId = jobId; + this.state = stateName; + this.message = ""; + } + + public JobResponseDTO(String jobId, String stateName, String msg) { + this.jobId = jobId; + this.state = stateName; + this.message = msg; + } + + public String getJobId() { return this.jobId; } + + public void setJobId(String jobid) { this.jobId = jobid; } + + public String getMessage() { return this.message; } + + public void setMessage(String msg) { this.message = msg; } + + public String getState() { return this.state; } + + public void setState(String stateName) { this.state = stateName; } + + +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/AdditionalPropertyDTO.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/AdditionalPropertyDTO.java new file mode 100644 index 00000000..6c34861b --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/AdditionalPropertyDTO.java @@ -0,0 +1,34 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-11-14 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations; + +import lombok.Data; + +@Data +public class AdditionalPropertyDTO { + private String key = ""; + private String value = ""; +} + diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/AssetDTO.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/AssetDTO.java new file mode 100644 index 00000000..db4d2a40 --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/AssetDTO.java @@ -0,0 +1,38 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-11-14 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations; + +import java.util.List; + +import lombok.Data; + +@Data +public class AssetDTO { + private String label; + private String type; + private String uri; + private String identifier; + private List additionalProperties; +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/ConsequenceDTO.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/ConsequenceDTO.java new file mode 100644 index 00000000..7541ed9c --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/ConsequenceDTO.java @@ -0,0 +1,38 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-11-14 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations; + +import lombok.Data; + +@Data +public class ConsequenceDTO { + private AssetDTO asset; + private String label; + private String description; + private String impact; + private String likelihood; + private String risk; + private String uri; +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/ControlDTO.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/ControlDTO.java new file mode 100644 index 00000000..c2be5ed9 --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/ControlDTO.java @@ -0,0 +1,36 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-11-14 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations; + +import lombok.Data; + +@Data +public class ControlDTO { + private String label; + private String description; + private String uri; + private AssetDTO asset; + private String action; +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/ControlStrategyDTO.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/ControlStrategyDTO.java new file mode 100644 index 00000000..ad8bdfb7 --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/ControlStrategyDTO.java @@ -0,0 +1,34 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-11-14 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations; + +import lombok.Data; + +@Data +public class ControlStrategyDTO { + private String uri; + private String description; + private String category; +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/RecommendationDTO.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/RecommendationDTO.java new file mode 100644 index 00000000..9e5690bc --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/RecommendationDTO.java @@ -0,0 +1,37 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-11-14 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations; + +import java.util.Set; + +import lombok.Data; + +@Data +public class RecommendationDTO { + private int identifier; + private Set controlStrategies; + private Set controls; + private StateDTO state; +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/RecommendationReportDTO.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/RecommendationReportDTO.java new file mode 100644 index 00000000..cbba6fef --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/RecommendationReportDTO.java @@ -0,0 +1,35 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-11-14 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations; + +import java.util.List; + +import lombok.Data; + +@Data +public class RecommendationReportDTO { + private StateDTO current; + private List recommendations; +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/StateDTO.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/StateDTO.java new file mode 100644 index 00000000..68c3faff --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/dto/recommendations/StateDTO.java @@ -0,0 +1,36 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2023 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By: Panos Melas +// Created Date: 2023-11-14 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.rest.dto.recommendations; + +import java.util.List; +import java.util.Map; + +import lombok.Data; + +@Data +public class StateDTO { + private Map risk; + private List consequences; +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/exceptions/BadRiskModeException.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/exceptions/BadRiskModeException.java new file mode 100644 index 00000000..b123d1aa --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/exceptions/BadRiskModeException.java @@ -0,0 +1,45 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2024 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By : Ken Meacham +// Created Date : 05/02/2024 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions; + +import java.util.Arrays; + +import uk.ac.soton.itinnovation.security.model.system.RiskCalculationMode; + +/** + * BAD_REQUEST error indicating invalid riskMode + * Will present as an HTTP response. + */ + +public class BadRiskModeException extends BadRequestErrorException { + public BadRiskModeException(String riskMode) { + super(createMessage(riskMode)); + } + + private static String createMessage(String riskMode) { + return(String.format("Invalid 'riskMode' parameter value: %s. Valid values are: %s", + riskMode, Arrays.toString(RiskCalculationMode.values()))); + } +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/exceptions/RiskModeMismatchException.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/exceptions/RiskModeMismatchException.java new file mode 100644 index 00000000..98a760ae --- /dev/null +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/rest/exceptions/RiskModeMismatchException.java @@ -0,0 +1,36 @@ +///////////////////////////////////////////////////////////////////////// +// +// © University of Southampton IT Innovation Centre, 2024 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By : Ken Meacham +// Created Date : 05/02/2024 +// Created for Project : Cyberkit4SME +// +///////////////////////////////////////////////////////////////////////// +package uk.ac.soton.itinnovation.security.systemmodeller.rest.exceptions; + +/** + * BAD_REQUEST error indicating msimatch between stored and requested risk calculation modes + * Will present as an HTTP response. + */ + +public class RiskModeMismatchException extends BadRequestErrorException { + public RiskModeMismatchException() { + super("Mismatch between the stored and requested risk calculation mode, please run the risk calculation"); + } +} diff --git a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/semantics/ModelObjectsHelper.java b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/semantics/ModelObjectsHelper.java index fb4bdccd..2234c147 100644 --- a/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/semantics/ModelObjectsHelper.java +++ b/src/main/java/uk/ac/soton/itinnovation/security/systemmodeller/semantics/ModelObjectsHelper.java @@ -102,8 +102,8 @@ public class ModelObjectsHelper { private Map> modelAssetUris; private Map> modelThreats; - private Map modelValidationProgress; - private Map> validationFutures; + private Map taskProgress; + private Map> taskFutures; private HashMap> loadingFutures; private Map modelLoadingProgress; @@ -115,6 +115,11 @@ public class ModelObjectsHelper { private List defaultUserDomainModels; //TODO: persist in TDB instead + private static final String FAILED = "failed"; + private static final String COMPLETED = "completed"; + private static final String CANCELLED = "cancelled"; + + /** * Initialises this component. */ @@ -124,8 +129,8 @@ public void init() throws IOException { modelAssetIDs = new HashMap<>(); modelAssetUris = new HashMap<>(); modelThreats = new HashMap<>(); - modelValidationProgress = new HashMap<>(); - validationFutures = new HashMap<>(); + taskProgress = new HashMap<>(); + taskFutures = new HashMap<>(); loadingFutures = new HashMap<>(); modelLoadingProgress = new HashMap<>(); modelLocks = new HashMap<>(); @@ -325,22 +330,22 @@ public void deleteAssetFromCache(Asset asset, Model model) { assetIDs.remove(uri); assetUris.remove(id); } - - public boolean registerValidationExecution(String modelId, ScheduledFuture future) { - if (validationFutures.containsKey(modelId)){ - ScheduledFuture validationExecution = validationFutures.get(modelId); - if (validationExecution.isDone()) { - logger.debug("Clearing previous validation execution"); + public boolean registerTaskExecution(String modelId, ScheduledFuture future) { + + if (taskFutures.containsKey(modelId)){ + ScheduledFuture taskExecution = taskFutures.get(modelId); + if (taskExecution.isDone()) { + logger.debug("Clearing previous task execution"); //TODO: tidy up previous execution } else { - logger.warn("Validation execution already registered (still running)"); + logger.warn("Task execution already registered (still running)"); return false; } } - logger.debug("Registering validation execution for model: {}", modelId); - validationFutures.put(modelId, future); + logger.debug("Registering task execution for model: {}", modelId); + taskFutures.put(modelId, future); return true; } @@ -352,7 +357,7 @@ public boolean registerLoadingExecution(String modelId, ScheduledFuture futur logger.debug("Clearing previous loading execution"); //TODO: tidy up previous execution } else { - logger.warn("Validation execution already registered (still running)"); + logger.warn("Loading execution already registered (still running)"); return false; } } @@ -362,61 +367,76 @@ public boolean registerLoadingExecution(String modelId, ScheduledFuture futur return true; } - public Progress getValidationProgressOfModel(Model model){ + //Default method retuns validation progress + public Progress getValidationProgressOfModel(Model model) { + return getTaskProgressOfModel("Validation", model); + } + + public Progress getTaskProgressOfModel(String name, Model model) { String modelId = model.getId(); - Progress validationProgress; - if (modelValidationProgress.containsKey(modelId)){ - validationProgress = modelValidationProgress.get(modelId); - } else { - validationProgress = new Progress(modelId); - modelValidationProgress.put(modelId, validationProgress); - } - //logger.info("Validation progress status: {}", validationProgress.getStatus()); - + Progress progress = getOrCreateTaskProgress(modelId); + // No need to check execution if not yet running - if (! "running".equals(validationProgress.getStatus())) { - logger.info("Validation not running - not checking execution status"); - return validationProgress; + if (! "running".equals(progress.getStatus())) { + logger.info("{} not running - not checking execution status", name); + return progress; } - if (validationFutures.containsKey(modelId)) { - ScheduledFuture validationExecution = validationFutures.get(modelId); - if (validationExecution.isDone()) { - Object result; - - try { - result = validationExecution.get(); - logger.debug("Validation result: {}", result != null ? result.toString() : "null"); - if ( (result == null) || (result.equals(false)) ) { - validationProgress.updateProgress(1.0, "Validation failed", "failed", "Unknown error"); - } - else { - validationProgress.updateProgress(1.0, "Validation complete", "completed"); - } - } catch (InterruptedException ex) { - logger.error("Could not get validation progress", ex); - validationProgress.updateProgress(1.0, "Validation cancelled", "cancelled"); - } catch (ExecutionException ex) { - logger.error("Could not get validation progress", ex); - validationProgress.updateProgress(1.0, "Validation failed", "failed", ex.getMessage()); - } - - // Finally, remove the execution from the list - logger.debug("Unregistering validation execution for model: {}", modelId); - validationFutures.remove(modelId); - //KEM - don't remove the progress object here, as others requests still need access to this - //(e.g. another user may monitor validation progress) - //modelValidationProgress.remove(modelId); - } + if (taskFutures.containsKey(modelId)) { + ScheduledFuture taskExecution = taskFutures.get(modelId); + updateProgressWithTaskResult(taskExecution, name, modelId, progress); } else { - logger.warn("No registered execution for model validation: {}", modelId); + String lowerName = name.toLowerCase(); + logger.warn("No registered execution for model {}: {}", lowerName, modelId); + } + + return progress; + } + + private Progress getOrCreateTaskProgress(String modelId) { + Progress progress; + + if (taskProgress.containsKey(modelId)){ + progress = taskProgress.get(modelId); + } else { + progress = new Progress(modelId); + taskProgress.put(modelId, progress); } - //logger.info("Validation progress: {}", validationProgress); - return validationProgress; + return progress; } + + private void updateProgressWithTaskResult(ScheduledFuture taskExecution, String name, String modelId, Progress progress) { + if (taskExecution.isDone()) { + Object result; + + try { + result = taskExecution.get(); + logger.debug("{} result: {}", name, result != null ? result.toString() : "null"); + if ( (result == null) || (result.equals(false)) ) { + progress.updateProgress(1.0, name + " failed", FAILED, "Unknown error"); + } + else { + progress.updateProgress(1.0, name + " complete", COMPLETED); + } + } catch (InterruptedException ex) { + logger.error("Could not get task progress", ex); + progress.updateProgress(1.0, name + " cancelled", CANCELLED); + } catch (ExecutionException ex) { + logger.error("Could not get task progress", ex); + progress.updateProgress(1.0, name + " failed", FAILED, ex.getMessage()); + } + + // Finally, remove the execution from the list + logger.info("Unregistering task execution for model: {}", modelId); + taskFutures.remove(modelId); + //KEM - don't remove the progress object here, as others requests still need access to this + //(e.g. another user may monitor validation progress) + //modelValidationProgress.remove(modelId); + } +} public LoadingProgress createLoadingProgressOfModel(Model model, String loadingProgressID){ @@ -454,24 +474,24 @@ public LoadingProgress getLoadingProgressOfModel(String loadingProgressID){ result = loadingExecution.get(); logger.debug("Loading result: {}", result != null ? result.toString() : "null"); if ( (result == null) || (! (result instanceof Model)) ) { - loadingProgress.updateProgress(1.0, "Loading failed", "failed", "Unknown error", null); + loadingProgress.updateProgress(1.0, "Loading failed", FAILED, "Unknown error", null); } else { - loadingProgress.updateProgress(1.0, "Loading complete", "completed", "", (Model)result); + loadingProgress.updateProgress(1.0, "Loading complete", COMPLETED, "", (Model)result); } } catch (InterruptedException ex) { logger.error("Could not get loading progress", ex); - loadingProgress.updateProgress(1.0, "Loading cancelled", "cancelled"); + loadingProgress.updateProgress(1.0, "Loading cancelled", CANCELLED); } catch (ExecutionException ex) { Throwable cause = ex.getCause(); if (cause instanceof ModelException) { ModelException me = (ModelException)cause; logger.error("Model exception", me); - loadingProgress.updateProgress(1.0, "Model error", "failed", me.getMessage(), me.getModel()); + loadingProgress.updateProgress(1.0, "Model error", FAILED, me.getMessage(), me.getModel()); } else { logger.error("Could not get loading progress", ex); - loadingProgress.updateProgress(1.0, "Loading failed", "failed", ex.getMessage(), null); + loadingProgress.updateProgress(1.0, "Loading failed", FAILED, ex.getMessage(), null); } } @@ -1380,6 +1400,7 @@ public boolean setUsersForDomainModel(String domainTitle, List users) { * @return true/false if they are able to access the domain model */ public boolean canUserAccessDomain(String domainURI, String user){ + /* For now, allow all users to access each domain model // If this is the user's first login we need to add them to // the management graph and set their default domain model // access. @@ -1387,6 +1408,9 @@ public boolean canUserAccessDomain(String domainURI, String user){ AStoreWrapper store = storeManager.getStore(); return store.queryAsk(TemplateLoader.formatTemplate(queries.get("CanUserAccessDomain"), storeManager.getManagementGraph(), user, domainURI)); + */ + + return true; } /** diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index f092c31b..7d8e84d7 100755 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -101,3 +101,6 @@ admin-role=admin # Make SpringDoc format the JSON nicely at /system-modeller/v3/api-docs springdoc.writer-with-default-pretty-printer=true + +# Timeout (in secs) for recommendations calculation +recommendations.timeout.secs=900 diff --git a/src/main/resources/logback.xml b/src/main/resources/logback.xml index 6d4e159e..7c74b108 100644 --- a/src/main/resources/logback.xml +++ b/src/main/resources/logback.xml @@ -33,11 +33,14 @@ - + - - - + + + + + + diff --git a/src/main/webapp/app/common/constants.js b/src/main/webapp/app/common/constants.js index ab71632c..19934dd7 100644 --- a/src/main/webapp/app/common/constants.js +++ b/src/main/webapp/app/common/constants.js @@ -3,8 +3,11 @@ export const CTRL_TOOLTIP_DELAY = 1000; export const MAX_ASSET_NAME_LENGTH = 50; export const ASSET_MIN_CARDINALITY = 1; //default min cardinality for new asset export const ASSET_MAX_CARDINALITY = 1; //default max cardinality for new asset +export const URI_PREFIX = "http://it-innovation.soton.ac.uk/ontologies/trustworthiness/" export const ASSET_DEFAULT_POPULATION = "http://it-innovation.soton.ac.uk/ontologies/trustworthiness/domain#PopLevelSingleton" export const MODELLING_ERRORS_URI = "http://it-innovation.soton.ac.uk/ontologies/trustworthiness/domain#Anomalies"; +export const ACCEPTABLE_RISK_LEVEL = "domain#RiskLevelMedium"; +export const MAX_RECOMMENDATIONS = 10; //max number of recommendations to display export const MODEL_NAME_LIMIT = 50; export const MODEL_DESCRIPTION_LIMIT = 500; export const DISABLE_GROUPING = false; //disable add group feature on Canvas diff --git a/src/main/webapp/app/dashboard/components/modelItem/ModelItem.js b/src/main/webapp/app/dashboard/components/modelItem/ModelItem.js index cc61dd9d..75c2b2d7 100644 --- a/src/main/webapp/app/dashboard/components/modelItem/ModelItem.js +++ b/src/main/webapp/app/dashboard/components/modelItem/ModelItem.js @@ -380,6 +380,9 @@ class ModelItem extends Component { } formatRiskCalcMode(mode) { + console.log("mode:", mode); + if (!mode) + return "unknown" //Capitalise first char, lower case the rest return mode.charAt(0).toUpperCase() + mode.slice(1).toLowerCase(); } diff --git a/src/main/webapp/app/dashboard/components/modelList/ModelList.js b/src/main/webapp/app/dashboard/components/modelList/ModelList.js index 1bb44a98..ecde703d 100644 --- a/src/main/webapp/app/dashboard/components/modelList/ModelList.js +++ b/src/main/webapp/app/dashboard/components/modelList/ModelList.js @@ -11,12 +11,20 @@ class ModelList extends Component { render() { let {models, filter, dispatch, ontologies, user} = this.props; + + models.map(model => { + model.name = model.name ? model.name : "null"; + return model; + }); + let domainToFilter = [] + for (let i = 0; i < filter.domainModelFilters.length; i++) { if(filter.domainModelFilters[i].checked){ domainToFilter[i] = filter.domainModelFilters[i].name.toLowerCase(); } } + return (
diff --git a/src/main/webapp/app/dashboard/components/popups/EditModelModal.js b/src/main/webapp/app/dashboard/components/popups/EditModelModal.js index 659c5ec4..eb7f90eb 100644 --- a/src/main/webapp/app/dashboard/components/popups/EditModelModal.js +++ b/src/main/webapp/app/dashboard/components/popups/EditModelModal.js @@ -17,7 +17,7 @@ class EditModelModal extends Component { componentWillMount() { this.setState({ ...this.state, - draftName: this.props.model.name, + draftName: this.props.model.name ? this.props.model.name : "null", draftDescription: this.props.model.description ? this.props.model.description : "" }) } diff --git a/src/main/webapp/app/domainManager/components/Domain.js b/src/main/webapp/app/domainManager/components/Domain.js index 920a5c58..555f7979 100644 --- a/src/main/webapp/app/domainManager/components/Domain.js +++ b/src/main/webapp/app/domainManager/components/Domain.js @@ -35,8 +35,8 @@ class Domain extends Component { - + {/* */} - - return (
- { controlSets.length > 0 ? controlSetReset : "" } + { controlSets.length > 0 ? this.renderControlSetOptions() : "" } + + + + + + + { + this.setState({ + ...this.state, + controls: {...this.state.controls, search: e.nativeEvent.target.value.trim()} + }) + }} + // need to prevent the Form being submitted when Return is pressed + onKeyPress={(e) => { e.key === 'Enter' && e.preventDefault(); }} + /> + + + + { + this.setState({ + ...this.state, + controls: { + ...this.state.controls, + filter: e.nativeEvent.target.checked + } + }) + }}> + Only asserted controls + + + + ) + } + renderControlStrategiesPanel(csgs) { let csgsRender = []; csgs.map((csgEntry, index) => { let name = csgEntry[0]; let csgList = csgEntry[1]; + let nProposedCsgs = this.countProposedControlStrategies(csgList); + let proposed = nProposedCsgs > 0; let context = {"selection": "csgType"}; let spinnerActive = false; //may not need this + // If text search filter is defined, filter out control strategies with names that don't match + if (this.state.csgs.search !== "") { + let search = this.state.csgs.search.toLowerCase(); + if (name.toLowerCase().indexOf(search) === -1) return; + } + + // If filter is active, filter out CSG groups that have no proposed CSGs + if (this.state.csgs.filter && (nProposedCsgs === 0)) return; + let csgOverlayProps = { delayShow: Constants.TOOLTIP_DELAY, placement: "left", overlay: { this.props.dispatch(openControlStrategyExplorer(csgList, context)); this.props.dispatch(bringToFrontWindow("controlStrategyExplorer")); }}> - {name} : {this.countProposedControlStrategies(csgList)} of {csgList.length} {" "} + {name} : {nProposedCsgs} of {csgList.length} {" "} {spinnerActive ? : null} @@ -549,6 +619,7 @@ class ModelSummary extends Component { return (
+ {csgs.length > 0 ? this.renderCsgOptions() : ""} + + + + { + this.setState({ + ...this.state, + csgs: {...this.state.csgs, search: e.nativeEvent.target.value.trim()} + }) + }} + // need to prevent the Form being submitted when Return is pressed + onKeyPress={(e) => { e.key === 'Enter' && e.preventDefault(); }} + /> + + + + { + this.setState({ + ...this.state, + csgs: { + ...this.state.csgs, + filter: e.nativeEvent.target.checked + } + }) + }}> + Only enabled control strategies + + + + ) + } + updateModel(id, updatedModel) { this.setState({ ...this.state, editDetailsModal: false }); this.props.dispatch(updateModel(id, updatedModel)); @@ -589,6 +700,7 @@ class ModelSummary extends Component { updateControlsState(controlLabels) { this.setState({...this.state, controls: { + ...this.state.controls, updating: controlLabels } }); diff --git a/src/main/webapp/app/modeller/components/panes/details/accordion/panels/ControlStrategiesPanel.js b/src/main/webapp/app/modeller/components/panes/details/accordion/panels/ControlStrategiesPanel.js index 2ef60126..0a786956 100644 --- a/src/main/webapp/app/modeller/components/panes/details/accordion/panels/ControlStrategiesPanel.js +++ b/src/main/webapp/app/modeller/components/panes/details/accordion/panels/ControlStrategiesPanel.js @@ -20,7 +20,11 @@ class ControlStrategiesPanel extends React.Component { {csgs.length > 0 ? csgs.map((csgEntry, index) => { let name = csgEntry[0]; let csg = csgEntry[1]; - let context = {"selection": "csg", "asset": this.props.asset}; + let asset = csg.asset ? csg.asset : this.props.asset; + if (this.props.displayAssetName) { + name += " at \"" + asset.label + "\""; + } + let context = {"selection": "csg", "asset": asset}; let csgOverlayProps = { delayShow: Constants.TOOLTIP_DELAY, placement: "left", @@ -65,6 +69,7 @@ class ControlStrategiesPanel extends React.Component { ControlStrategiesPanel.propTypes = { modelId: PropTypes.string, asset: PropTypes.object, + displayAssetName: PropTypes.bool, assetCsgs: PropTypes.array, dispatch: PropTypes.func, authz: PropTypes.object diff --git a/src/main/webapp/app/modeller/components/panes/details/accordion/panels/ThreatsPanel.js b/src/main/webapp/app/modeller/components/panes/details/accordion/panels/ThreatsPanel.js index 81994ab1..e416a59d 100644 --- a/src/main/webapp/app/modeller/components/panes/details/accordion/panels/ThreatsPanel.js +++ b/src/main/webapp/app/modeller/components/panes/details/accordion/panels/ThreatsPanel.js @@ -547,7 +547,6 @@ class ThreatsPanel extends React.Component { let threatDescNameIdx = threat["description"].indexOf(": "); let threatName = threat["label"]; - //console.log(threatName); if (threatDescNameIdx !== -1) { // use name from description if found @@ -587,16 +586,14 @@ class ThreatsPanel extends React.Component { status = statusString; } - //console.log("status: ", status); - //console.log("triggeredStatus: ", triggeredStatus); - let threatColorAndBE = ((status === "BLOCKED") || (status === "MITIGATED")) ? getThreatColor(threat, this.props.model.controlStrategies ,this.props.model.levels["TrustworthinessLevel"], true) : undefined; - //console.log("threatColorAndBE: ", threatColorAndBE); //status: UNMANAGED, ACCEPTED, MITIGATED, BLOCKED let statusText = ""; + let emptyLevelTooltip; let symbol; + let threatClass = ""; /* Uncomment to add triggered state, e.g. for debugging if (triggeredStatus === "UNTRIGGERED") { @@ -607,6 +604,10 @@ class ThreatsPanel extends React.Component { } */ + if (triggeredStatus === "TRIGGERED") { + threatClass = "triggered"; + } + //Is threat a normal operation let normalOperation = threat.normalOperation !== undefined ? threat.normalOperation : false; @@ -614,24 +615,36 @@ class ThreatsPanel extends React.Component { // For now, display a blank icon here, as a space filler // TODO: display a better icon here, e.g. depending on a "isAdverseOp" - see issue #107 symbol = + threatClass = "normal"; } else if (status === "BLOCKED") { statusText += "Managed (" + threatColorAndBE.be.label + ")"; symbol = ; + threatClass = "blocked"; } else if (status === "MITIGATED") { statusText += "Managed (" + threatColorAndBE.be.label + ")"; symbol = ; + threatClass = "mitigated"; } else if (status === "ACCEPTED") { statusText += "Accepted"; // TODO: put these colors and style in a stylesheet symbol = } else { - statusText += "Unmanaged"; + if (triggeredStatus === "TRIGGERED") { + statusText += "Triggered"; + threatClass = "triggered"; + } + else { + statusText += "Unmanaged"; + threatClass = "unmanaged"; + } symbol = ; } if (triggeredStatus === "UNTRIGGERED") { statusText = "Untriggered side effect"; + emptyLevelTooltip = "This threat poses no risk as it has not been enabled by a control strategy"; symbol = ; + threatClass = "untriggered"; } let root_cause = threat.rootCause; @@ -649,23 +662,20 @@ class ThreatsPanel extends React.Component { resolved = (threat['acceptanceJustification'] !== null); } - //console.log(threatLabel + " resolved = " + resolved); - //is threat selected? let selected = this.props.selectedThreat && this.props.selectedThreat.id === threat.id; - //let impact = threat["impactLevel"]; let likelihood = threat["likelihood"]; let risk = threat["riskLevel"]; let distance = threat.distance; - let likelihoodRender = getRenderedLevelText(this.props.model.levels.Likelihood, likelihood); - let riskRender = getRenderedLevelText(this.props.model.levels.RiskLevel, risk); + let likelihoodRender = getRenderedLevelText(this.props.model.levels.Likelihood, likelihood, false, emptyLevelTooltip); + let riskRender = getRenderedLevelText(this.props.model.levels.RiskLevel, risk, false, emptyLevelTooltip); threatsRender.push(
{ + this.props.dispatch(getRecommendations( + this.props.model.id, + this.props.model.riskCalculationMode, + acceptableRiskLevel, + this.props.selectedMisbehaviour.misbehaviour.uri, + false)); + }; + return (
@@ -203,7 +214,7 @@ class MisbehaviourAccordion extends React.Component { - {this.props.model.riskCalculationMode ? "Calculate attack path" : "Run risk calculation first!"} + {this.props.model.riskLevelsValid ? "Calculate attack path" : "To use this, first run the risk calculation"} } > @@ -211,11 +222,34 @@ class MisbehaviourAccordion extends React.Component { className="btn btn-primary btn-xs" disabled={ attackPathThreats.length > 0 || - !this.props.model.riskCalculationMode + !this.props.model.riskLevelsValid } onClick={handleThreatGraphButtonClick} > - Calculate Attack Path + Get Attack Path + + + + {this.props.model.riskLevelsValid ? "Calculate recommendations" : "To use this, first run the risk calculation"} + + } + > + {loadingAttackPath ? : null} diff --git a/src/main/webapp/app/modeller/components/panes/recommendationsExplorer/AbortRecommendationsModal.js b/src/main/webapp/app/modeller/components/panes/recommendationsExplorer/AbortRecommendationsModal.js new file mode 100644 index 00000000..e99e38d8 --- /dev/null +++ b/src/main/webapp/app/modeller/components/panes/recommendationsExplorer/AbortRecommendationsModal.js @@ -0,0 +1,45 @@ +import PropTypes from 'prop-types'; +import React, { Component } from "react"; +import { Button, Modal } from "react-bootstrap"; + +class AbortRecommendationsModal extends Component { + + render() { + const {modelId, jobId, abortRecommendations, ...modalProps} = this.props; + + return ( + + + Abort Recommendations + + +

Abort current recommendations calculation?

+
+ + + + +
+ ); + } +} + +AbortRecommendationsModal.propTypes = { + modelId: PropTypes.string, + jobId: PropTypes.string, + abortRecommendations: PropTypes.func, + onHide: PropTypes.func, +}; + +export default AbortRecommendationsModal; diff --git a/src/main/webapp/app/modeller/components/panes/recommendationsExplorer/RecommendationsExplorer.js b/src/main/webapp/app/modeller/components/panes/recommendationsExplorer/RecommendationsExplorer.js new file mode 100644 index 00000000..241a8aa0 --- /dev/null +++ b/src/main/webapp/app/modeller/components/panes/recommendationsExplorer/RecommendationsExplorer.js @@ -0,0 +1,357 @@ +import React from "react"; +import PropTypes from 'prop-types'; +import {Panel, Button, OverlayTrigger, Tooltip} from "react-bootstrap"; +import {connect} from "react-redux"; +import {JsonView, defaultStyles} from 'react-json-view-lite'; +import 'react-json-view-lite/dist/index.css'; +import Explorer from "../common/Explorer"; +import ControlStrategiesPanel from "../details/accordion/panels/ControlStrategiesPanel"; +import * as Constants from "../../../../common/constants.js"; +import {renderControlSet} from "../csgExplorer/ControlStrategyRenderer"; +import { + updateControlOnAsset, + updateControls, +} from "../../../../modeller/actions/ModellerActions"; + +class RecommendationsExplorer extends React.Component { + + constructor(props) { + super(props); + + this.renderContent = this.renderContent.bind(this); + this.renderJson = this.renderJson.bind(this); + this.renderRecommendations = this.renderRecommendations.bind(this); + this.renderNoRecommendations = this.renderNoRecommendations.bind(this); + this.renderControlSets = this.renderControlSets.bind(this); + this.getControlSets = this.getControlSets.bind(this); + this.getRiskVector = this.getRiskVector.bind(this); + this.getHighestRiskLevel = this.getHighestRiskLevel.bind(this); + this.getAssetByUri = this.getAssetByUri.bind(this); + this.getRiskVectorString = this.getRiskVectorString.bind(this); + this.compareRiskVectors = this.compareRiskVectors.bind(this); + this.updateThreat = this.updateThreat.bind(this); + this.applyRecommendation = this.applyRecommendation.bind(this); + + this.state = { + updatingControlSets: {} + } + + } + + componentWillReceiveProps(nextProps) { + this.setState({...this.state, + updatingControlSets: {} + }); + } + + render() { + if (!this.props.show) { + return null; + } + + return ( + + ) + } + + renderContent() { + let renderRecommentations = true; + let recommendations = this.props.recommendations; + + if (renderRecommentations) { + return this.renderRecommendations(recommendations); + } + else { + return this.renderJson(recommendations); + } + } + + renderJson(recommendations) { + return ( +
+ {recommendations && } +
+ ) + } + + renderRecommendations(report) { + if (jQuery.isEmptyObject(report)) { + return null; + } + + let max_recommendations = Constants.MAX_RECOMMENDATIONS; //max number to display + let recommendations = report.recommendations || []; + let selected_recommendations = []; + + recommendations.forEach(rec => { + rec.state.riskVector = this.getRiskVector(rec.state.risk); + }); + + recommendations.sort((a, b) => this.compareRiskVectors(a.state.riskVector, b.state.riskVector)); //sort by ascending risk vector + + //Select recommendations from the top of the list, up to the max number + selected_recommendations = recommendations.slice(0, max_recommendations); + + let csgAssets = this.props.csgAssets; + + return ( +
+
+

N.B. The recommendations feature is a work in progress + and will not always give the most sensible recommendation(s). + It may also take a very long time to run, in particular for Future risk recommendations. + A list of known issues can be found on GitHub.

+ {recommendations.length > 0 &&

Returned {recommendations.length} recommendations + {recommendations.length > max_recommendations ? " (Displaying top " + max_recommendations + ")" : ""}

} +
+ {!recommendations.length ? this.renderNoRecommendations() : +
+ {selected_recommendations.map((rec, index) => { + let id = rec.identifier; + let reccsgs = rec.controlStrategies; + let riskVectorString = this.getRiskVectorString(rec.state.riskVector); + let riskLevel = this.getHighestRiskLevel(rec.state.riskVector); + let csgsByName = new Map(); + + reccsgs.forEach((reccsg) => { + let csguri = Constants.URI_PREFIX + reccsg.uri; + let csg = this.props.model.controlStrategies[csguri]; + let name = csg.label; + let assetUri = csgAssets[csguri]; + let asset = assetUri ? this.getAssetByUri(assetUri) : {label: "Unknown"} + csg.asset = asset; + csgsByName.set(name, csg); + }); + + csgsByName = new Map([...csgsByName.entries()].sort((a, b) => a[0].localeCompare(b[0]))); + let csgsArray = Array.from(csgsByName); + let applyButtonTooltipText = "Enable all recommended controls"; + + return ( + + + + Recommendation {index + 1} + + + + +

Residual risk: {riskLevel.label} ({riskVectorString})

+

Control Strategies to enable

+ +

Controls to enable

+ {this.renderControlSets(rec.controls)} +

+ + {applyButtonTooltipText}}> + + +

+
+
+
+ ); + })} +
} +
+ ) + } + + renderNoRecommendations() { + return ( +
+

There are no current recommendations for reducing the system model risk any further.

+
+ ); + } + + renderControlSets(controls) { + let controlSets = this.getControlSets(controls); + controlSets.sort((a, b) => a["label"].localeCompare(b["label"])); + + return ( +
+ {controlSets.map((control, index) => { + control.optional = false; //assume recommendation does not suggest optional controls + let asset = control["assetUri"] ? this.getAssetByUri(control["assetUri"]) : {label: "Unknown"} + let assetName = asset.label; + return renderControlSet(control, index, null, true, assetName, this.props, this.state, this); + })} +
+ ); + } + + getControlSets(controls) { + let modelControlSets = this.props.controlSets; + let controlSets = controls.map(control => { + let csuri = Constants.URI_PREFIX + control.uri; + let cs = modelControlSets[csuri]; + return cs; + }); + + return controlSets; + } + + getRiskVector(reportedRisk) { + let shortUris = Object.keys(reportedRisk); + let riskLevels = this.props.model.levels["RiskLevel"]; + + let riskLevelsMap = {} + riskLevels.forEach(level => { + let levelUri = level.uri; + riskLevelsMap[levelUri] = level; + }); + + let riskVector = shortUris.map(shorturi => { + let uri = Constants.URI_PREFIX + shorturi; + let riskLevel = riskLevelsMap[uri]; + let riskLevelCount = {level: riskLevel, count: reportedRisk[shorturi]} + return riskLevelCount; + }); + + //Finally sort the risk vector by level value + riskVector.sort((a, b) => { + if (a.level.value < b.level.value) { + return -1; + } + else if (a.level.value > b.level.value) { + return 1; + } + else { + return 0; + } + }); + + return riskVector; + } + + //e.g. "Very Low: 695, Low: 0, Medium: 1, High: 0, Very High: 0" + getRiskVectorString(riskVector) { + let strArr = riskVector.map(riskLevelCount => { + let level = riskLevelCount.level; + return [level.label, riskLevelCount.count].join(": "); + }); + + return strArr.join(", "); + } + + //Compare risk vectors (assumes arrays are pre-sorted) + compareRiskVectors(rva, rvb) { + let compare = 0; + for (let i = rva.length -1; i >= 0; i--) { + compare = rva[i].count - rvb[i].count; + if (compare !== 0) { + return compare; + } + } + return compare; + } + + //Get highest risk level from given risk vector + //i.e. which is the highest risk level that has >0 misbehaviours + //TODO: could this be moved to a utility function? + getHighestRiskLevel(riskVector) { + let overall = 0; + let hishestLevel = null; + riskVector.forEach(riskLevelCount => { + let level = riskLevelCount.level; + let count = riskLevelCount.count; + if (count > 0 && level.value >= overall) { + overall = level.value; + hishestLevel = level; + } + }); + + return hishestLevel; + } + + //TODO: this should be a utility function on the model + getAssetByUri(assetUri) { + let asset = this.props.model.assets.find((asset) => { + return (asset.uri === assetUri); + }); + return asset; + } + + updateThreat(arg) { + //this is to enable a single control in a control strategy + if (arg.hasOwnProperty("control")) { + //Here we still want to keep the currently selected asset, not change to the asset referred to in the updatedControl + this.props.dispatch(updateControlOnAsset(this.props.model.id, arg.control.assetId, arg.control)); + } + } + + applyRecommendation(recid) { + let proposed = true; + let report = this.props.recommendations; //get recommendations report + let rec = report.recommendations.find((rec) => rec["identifier"] === recid); + + if (rec) { + let controlsToUpdate = rec.controls.map(control => { + return Constants.URI_PREFIX + control.uri; + }); + + let updatingControlSets = {...this.state.updatingControlSets}; + controlsToUpdate.forEach(controlUri => { + updatingControlSets[controlUri] = true; + }); + + this.setState({ + updatingControlSets: updatingControlSets, + }); + + this.props.dispatch(updateControls(this.props.model.id, controlsToUpdate, proposed, proposed)); //set WIP flag only if proposed is true + } + else { + console.warn("Could not locate recommendation: ", recid); + } + } + +} + +function shouldExpandRecommendationsNode(level) { + return level <= 1; +} + +RecommendationsExplorer.propTypes = { + model: PropTypes.object, + controlSets: PropTypes.object, + csgAssets: PropTypes.object, + selectedAsset: PropTypes.object, + isActive: PropTypes.bool, // is in front of other panels + recommendations: PropTypes.object, + show: PropTypes.bool, + onHide: PropTypes.func, + loading: PropTypes.object, + dispatch: PropTypes.func, + windowOrder: PropTypes.number, + authz: PropTypes.object, +}; + +let mapStateToProps = function (state) { + return { + windowOrder: state.view["recommendationsExplorer"] + } +}; + +export default connect(mapStateToProps)(RecommendationsExplorer); diff --git a/src/main/webapp/app/modeller/components/panes/threats/ThreatEditor.js b/src/main/webapp/app/modeller/components/panes/threats/ThreatEditor.js index 5eac9a5e..6fba72a1 100644 --- a/src/main/webapp/app/modeller/components/panes/threats/ThreatEditor.js +++ b/src/main/webapp/app/modeller/components/panes/threats/ThreatEditor.js @@ -11,6 +11,7 @@ import {getRenderedLevelText} from "../../util/Levels"; import {bringToFrontWindow, closeWindow} from "../../../actions/ViewActions"; import {connect} from "react-redux"; import {openDocumentation} from "../../../../common/documentation/documentation" +import {getThreatStatus} from "../../util/ThreatUtils.js"; class ThreatEditor extends React.Component { @@ -100,9 +101,33 @@ class ThreatEditor extends React.Component { let likelihoodRender = Error; let riskRender = Error; - if(this.props.model.levels!=null){ - likelihoodRender = getRenderedLevelText(this.props.model.levels.Likelihood, likelihood); - riskRender = getRenderedLevelText(this.props.model.levels.RiskLevel, risk); + let statusString = getThreatStatus(threat, this.props.model.controlStrategies); + let status; + let triggeredStatus = ""; + + if (statusString.includes("/")) { + let arr = statusString.split("/"); + status = arr[0]; + triggeredStatus = arr[1]; + } + else { + status = statusString; + } + + let emptyLevelTooltip; //tooltip to display when likelihood or risk is not available ("N/A") + let triggerableText; //text to display for a triggerable threat + + if (triggeredStatus === "UNTRIGGERED") { + triggerableText = "N.B. This threat is not currently active. It is triggered when a particular control strategy is enabled."; + emptyLevelTooltip = "This threat poses no risk as it has not been enabled by a control strategy"; + } + else if (triggeredStatus === "TRIGGERED") { + triggerableText = "N.B. This threat has been triggered by a particular control strategy which is currently enabled."; + } + + if (this.props.model.levels != null){ + likelihoodRender = getRenderedLevelText(this.props.model.levels.Likelihood, likelihood, false, emptyLevelTooltip); + riskRender = getRenderedLevelText(this.props.model.levels.RiskLevel, risk, false, emptyLevelTooltip); } let assetLabelHeading =
+ {this.props.developerMode &&

{threat.uri}

} {threat.normalOperation &&

This "threat" is expected to occur in normal operation.

}

{threat !== undefined ? threat["description"] : ""}

+ {triggerableText &&

+ {triggerableText} +

} {!isComplianceThreat &&
@@ -247,6 +276,8 @@ class ThreatEditor extends React.Component { controlSets={this.props.model["controlSets"]} modelId={this.props.model["id"]} threat={threat} + threatStatus={status} + triggeredStatus={triggeredStatus} threats={this.props.model["threats"]} twas={this.props.model["twas"]} selectedMisbehaviour={this.props.selectedMisbehaviour} diff --git a/src/main/webapp/app/modeller/components/panes/threats/accordion/ThreatAccordion.js b/src/main/webapp/app/modeller/components/panes/threats/accordion/ThreatAccordion.js index 6f1b0fb1..26853e9a 100644 --- a/src/main/webapp/app/modeller/components/panes/threats/accordion/ThreatAccordion.js +++ b/src/main/webapp/app/modeller/components/panes/threats/accordion/ThreatAccordion.js @@ -14,7 +14,6 @@ import EffectPanel from "./panels/EffectPanel"; import CausePanel from "./panels/CausePanel"; import ModelMisBehavPanel from "../../details/accordion/panels/ModelMisBehavPanel"; import ControlStrategiesPanel from "./panels/ControlStrategiesPanel"; -import {getThreatStatus} from "../../../util/ThreatUtils"; import {bringToFrontWindow} from "../../../../actions/ViewActions"; class ThreatAccordion extends React.Component { @@ -31,6 +30,10 @@ class ThreatAccordion extends React.Component { this.getEntryPoints = this.getEntryPoints.bind(this); this.renderHeader = this.renderHeader.bind(this); this.renderHeaderNumbers = this.renderHeaderNumbers.bind(this); + this.renderCsgPanels = this.renderCsgPanels.bind(this); + this.renderCsgsPanel = this.renderCsgsPanel.bind(this); + this.getNonTriggerCsgs = this.getNonTriggerCsgs.bind(this); + this.getTriggerCsgs = this.getTriggerCsgs.bind(this); this.state = { expanded: { @@ -59,81 +62,7 @@ class ThreatAccordion extends React.Component { return null; } - let isComplianceThreat = this.props.threat.isComplianceThreat; - - let statusString = getThreatStatus(this.props.threat, this.props.controlStrategies); - let status; - let triggeredStatus = "UNTRIGGERED"; - - if (statusString.includes("/")) { - let arr = statusString.split("/"); - status = arr[0]; - triggeredStatus = arr[1]; - } - else { - status = statusString; - } - - let propControlSets = {}; - this.props.controlSets.forEach(cs => propControlSets[cs.uri] = cs); - - //get map of control strategy types for this threat - var csgTypes = this.props.threat["controlStrategies"]; - - //filter out any CSGs that are of type "TRIGGER", when not in developer mode (as we only need those that BLOCK or MITEGATE the threat) - let csgsAsArray = Object.keys(csgTypes).map(csgUri => { - let csgType = csgTypes[csgUri]; - if ((csgType === "TRIGGER") && !this.props.developerMode) - return undefined; - else { - let csg = this.props.controlStrategies[csgUri]; - return csg; - } - }).filter(csg => csg !== undefined); - - // We don't want to display or count CSGs where one or more CSs cannot be asserted - csgsAsArray = csgsAsArray.filter(csg => { - let controlSetUris = csg.mandatoryControlSets.concat(csg.optionalControlSets); - let controlSets = controlSetUris.map(csUri => { - let cs = propControlSets[csUri]; - return cs; - }); - let strategyNotAssertable = controlSets.map(cs => !(cs.assertable)).reduce( - (previousValue, currentValue) => previousValue || currentValue, - false - ) - return !strategyNotAssertable; - }) - - // TODO: the correctly filtered list of CSGs that we now have is only used here for the count in the panel - // We should be passing the filtered list into the ControlStrategiesPanel for display. As it is, similar filtering code is also implemented in there. - // The necessary Object can be created using Object.fromEntries(csgsAsArray) - - //filter only those that are resolved (enabled) - let csgResolved = csgsAsArray.filter(csg => csg.enabled); - - let nCsgs = csgsAsArray.length; //total number of CGSs - let nCsgResolved = csgResolved.length; //number of CGSs resolved - - var csgStyle = "danger"; //no CGSs are active - - if ((status === "BLOCKED") || (status === "MITIGATED")) { - if (nCsgResolved === nCsgs) { - csgStyle = "success"; //all CGSs are active - } - else { - csgStyle = "warning"; //at least one CGS is active - } - } - else if (status === "ACCEPTED") { - csgStyle = "warning"; - } - - // 7/2/2019: decided that colouring would simply be green here, if one or more CSGs are enabled (see #628) - //let csgsPanelColor = getThreatColor(this.props.threat, this.props.levels["TrustworthinessLevel"]); - let csgsPanelColor = undefined; //this just means the style is not used, so we use csgStyle instead below - - let {expanded} = this.state; + let isComplianceThreat = this.props.threat.isComplianceThreat; //N.B. The causes, effects, secondaryEffects are set in populateThreatMisbehaviours() let causes = this.props.threat.secondaryEffectConditions; @@ -144,11 +73,6 @@ class ThreatAccordion extends React.Component { // get full TWAS objects array for this threat (not for compliance threats) let entryPoints = !isComplianceThreat ? this.getEntryPoints(this.props.threat) : []; - //Previously we did not display CSGs for modelling errors, however there are known - //scenarios where there is a CSG available - //let showCSGs = !this.props.threat.isModellingError; - let showCSGs = true; - //TODO: move the style to a stylesheet let directCauseIcon = ; let directEffectsIcon = ; @@ -244,30 +168,7 @@ class ThreatAccordion extends React.Component { } - {showCSGs && - - - {this.renderHeaderNumbers("Control Strategies", null, "Control strategies that will address this threat", "threat-ctrl-strat", nCsgResolved, nCsgs)} - - - - - - - - } + {this.renderCsgPanels()}
); @@ -310,6 +211,140 @@ class ThreatAccordion extends React.Component { ); } + renderCsgPanels() { + //Create map of csuri -> cs + let propControlSets = {}; + this.props.controlSets.forEach(cs => propControlSets[cs.uri] = cs); + + return ( + [this.renderCsgsPanel(propControlSets, this.getNonTriggerCsgs(), false), + this.renderCsgsPanel(propControlSets, this.getTriggerCsgs(), true)] + ); + } + + // Get standard CSGs (non-triggering) + getNonTriggerCsgs() { + //get map of control strategy types for this threat + let csgTypes = this.props.threat["controlStrategies"]; + + //filter out any CSGs that are of type "TRIGGER" + let csgsAsArray = Object.keys(csgTypes).map(csgUri => { + let csgType = csgTypes[csgUri]; + if (csgType === "TRIGGER") + return undefined; + else { + let csg = this.props.controlStrategies[csgUri]; + return csg; + } + }).filter(csg => csg !== undefined); + + return csgsAsArray; + } + + // Get CSGs that are triggering + getTriggerCsgs() { + //get map of control strategy types for this threat + let csgTypes = this.props.threat["controlStrategies"]; + + //filter out any CSGs that are NOT of type "TRIGGER" + let csgsAsArray = Object.keys(csgTypes).map(csgUri => { + let csgType = csgTypes[csgUri]; + if (csgType !== "TRIGGER") + return undefined; + else { + let csg = this.props.controlStrategies[csgUri]; + return csg; + } + }).filter(csg => csg !== undefined); + + return csgsAsArray; + } + + renderCsgsPanel(propControlSets, csgsAsArray, triggering) { + let panelTitle = triggering ? "Triggering Control Strategies" : "Control Strategies"; + let panelTooltip = triggering ? "Control strategies that will trigger this threat" : + "Control strategies that will address this threat"; + + // We don't want to display or count CSGs where one or more CSs cannot be asserted + csgsAsArray = csgsAsArray.filter(csg => { + let controlSetUris = csg.mandatoryControlSets.concat(csg.optionalControlSets); + let controlSets = controlSetUris.map(csUri => { + let cs = propControlSets[csUri]; + return cs; + }); + let strategyNotAssertable = controlSets.map(cs => !(cs.assertable)).reduce( + (previousValue, currentValue) => previousValue || currentValue, + false + ) + return !strategyNotAssertable; + }) + + // TODO: the correctly filtered list of CSGs that we now have is only used here for the count in the panel + // We should be passing the filtered list into the ControlStrategiesPanel for display. As it is, similar filtering code is also implemented in there. + // The necessary Object can be created using Object.fromEntries(csgsAsArray) + + //filter only those that are resolved (enabled) + let csgResolved = csgsAsArray.filter(csg => csg.enabled); + + let nCsgs = csgsAsArray.length; //total number of CGSs + let nCsgResolved = csgResolved.length; //number of CGSs resolved + + // Don't display triggering CSGs panel if there aren't any + if (triggering && nCsgs === 0) { + return null; + } + + //Set default CSG style (triggered threats are opposite colour) + let csgStyle = triggering ? "success" : "danger"; //no CGSs are active + let status = this.props.threatStatus; + + if ((status === "BLOCKED") || (status === "MITIGATED") || triggering) { + if (nCsgResolved === nCsgs) { + csgStyle = triggering ? "danger" : "success"; //all CGSs are active + } + else if (nCsgResolved > 0) { + csgStyle = "warning"; //at least one CGS is active + } + } + else if ((status === "ACCEPTED") && !triggering) { + csgStyle = "warning"; + } + + // 7/2/2019: decided that colouring would simply be green here, if one or more CSGs are enabled (see #628) + //let csgsPanelColor = getThreatColor(this.props.threat, this.props.levels["TrustworthinessLevel"]); + let csgsPanelColor; //this just means the style is not used, so we use csgStyle instead below + + let key = triggering ? "triggering-csgs" : "csgs"; + + return ( + + + + {this.renderHeaderNumbers(panelTitle, null, panelTooltip, "threat-ctrl-strat", nCsgResolved, nCsgs)} + + + + + + + + + ) + } + activateAcceptancePanel(arg) { this.props.dispatch(activateAcceptancePanel(arg)); } @@ -508,6 +543,8 @@ ThreatAccordion.propTypes = { controlStrategies: PropTypes.object, controlSets: PropTypes.array, //all controlSets threat: PropTypes.object, + threatStatus: PropTypes.string, + triggeredStatus: PropTypes.string, threats: PropTypes.array, twas: PropTypes.object, selectedMisbehaviour: PropTypes.object, diff --git a/src/main/webapp/app/modeller/components/panes/threats/accordion/panels/ControlStrategiesPanel.js b/src/main/webapp/app/modeller/components/panes/threats/accordion/panels/ControlStrategiesPanel.js index 8dd058fc..3334c184 100644 --- a/src/main/webapp/app/modeller/components/panes/threats/accordion/panels/ControlStrategiesPanel.js +++ b/src/main/webapp/app/modeller/components/panes/threats/accordion/panels/ControlStrategiesPanel.js @@ -1,7 +1,7 @@ import React, {Fragment} from "react"; import PropTypes from 'prop-types'; import {Button, Checkbox, FormControl, FormGroup} from "react-bootstrap"; -import renderControlStrategy from "../../../csgExplorer/ControlStrategyRenderer"; +import {renderControlStrategy} from "../../../csgExplorer/ControlStrategyRenderer"; import {connect} from "react-redux"; class ControlStrategiesPanel extends React.Component { @@ -88,7 +88,7 @@ class ControlStrategiesPanel extends React.Component { const controlStrategy = csgDict[csgName]; return renderControlStrategy(csgName, controlStrategy, index, threat, this.state, this.props, this, "threat-explorer"); })} - {!isComplianceThreat && this.props.authz.userEdit ? this.props.toggleAcceptThreat({ acceptThreat: a, reason: b })} /> @@ -103,6 +103,7 @@ class ControlStrategiesPanel extends React.Component { ControlStrategiesPanel.propTypes = { threat: PropTypes.object, asset: PropTypes.object, + triggering: PropTypes.bool, filteredCsgs: PropTypes.array, controlStrategies: PropTypes.object, controlSets: PropTypes.object, diff --git a/src/main/webapp/app/modeller/components/util/Levels.js b/src/main/webapp/app/modeller/components/util/Levels.js index 92a66568..f24a2295 100644 --- a/src/main/webapp/app/modeller/components/util/Levels.js +++ b/src/main/webapp/app/modeller/components/util/Levels.js @@ -2,10 +2,10 @@ import React from "react"; import {OverlayTrigger, Tooltip, FormControl} from "react-bootstrap"; import * as Constants from "../../../common/constants.js"; -export function getRenderedLevelText(levels, level, reverseColours) { +export function getRenderedLevelText(levels, level, reverseColours, emptyLevelTooltip) { if (levels === null || level === undefined || level === null){ - return renderEmptyLevel(); + return renderEmptyLevel(emptyLevelTooltip); } else { let colour = getLevelColour(levels, level, reverseColours); @@ -20,14 +20,16 @@ export function getRenderedLevelText(levels, level, reverseColours) { } -function renderEmptyLevel() { +function renderEmptyLevel(tooltip) { + let tooltipText = tooltip || "Not yet available - please run Risk Calculation"; + return ( - Not yet available - please run Risk Calculation}> + {tooltipText}}> N/A ); diff --git a/src/main/webapp/app/modeller/components/util/LoadingOverlay.js b/src/main/webapp/app/modeller/components/util/LoadingOverlay.js index 569db6de..a5ab2c04 100644 --- a/src/main/webapp/app/modeller/components/util/LoadingOverlay.js +++ b/src/main/webapp/app/modeller/components/util/LoadingOverlay.js @@ -1,11 +1,12 @@ import React from "react"; import PropTypes from "prop-types"; import {Modal, Button, ProgressBar} from "react-bootstrap"; +import AbortRecommendationsModal from "../panes/recommendationsExplorer/AbortRecommendationsModal"; import { - loadingCompleted, pollForLoadingProgress, pollForValidationProgress, pollForRiskCalcProgress, + pollForLoadingProgress, pollForValidationProgress, pollForRiskCalcProgress, pollForRecommendationsProgress, validationCompleted, validationFailed, - riskCalcCompleted, riskCalcFailed, changeSelectedAsset - //resetValidation + riskCalcCompleted, riskCalcFailed, changeSelectedAsset, + recommendationsCompleted, recommendationsFailed, abortRecommendations, } from "../../actions/ModellerActions"; class LoadingOverlay extends React.Component { @@ -33,26 +34,43 @@ class LoadingOverlay extends React.Component { } }, progress: 0, + abortRecommendationsModal: false, }; this.checkProgress = this.checkProgress.bind(this); this.pollValidationProgress = this.pollValidationProgress.bind(this); this.pollRiskCalcProgress = this.pollRiskCalcProgress.bind(this); + this.pollRecommendationsProgress = this.pollRecommendationsProgress.bind(this); this.pollLoadingProgress = this.pollLoadingProgress.bind(this); this.pollDroppingInfGraphProgress = this.pollDroppingInfGraphProgress.bind(this); this.getValidationTimeout = this.getValidationTimeout.bind(this); this.getRiskCalcTimeout = this.getRiskCalcTimeout.bind(this); + this.getRecommendationsTimeout = this.getRecommendationsTimeout.bind(this); this.getLoadingTimeout = this.getLoadingTimeout.bind(this); this.getTimeout = this.getTimeout.bind(this); + this.getHeaderText = this.getHeaderText.bind(this); + this.onKeyPress = this.onKeyPress.bind(this); + this.abortRecommendations = this.abortRecommendations.bind(this); + } + onKeyPress(event){ + //Check for Escape key + if (event.keyCode === 27) { + if (this.props.isCalculatingRecommendations) { + this.setState({...this.state, abortRecommendationsModal: true}); + } + } } componentDidMount() { - //console.log("LoadingOverlay timeout settings: ", this.state); + document.addEventListener("keydown", this.onKeyPress, false); + } + + componentWillUnmount() { + document.removeEventListener("keydown", this.onKeyPress, false); } componentWillReceiveProps(nextProps) { - //console.log("LoadingOverlay: componentWillReceiveProps", this.props.isValidating, nextProps.isValidating); let showModal = this.state.showModal; let timeout = this.state.timeout; let progress = this.state.progress; @@ -69,12 +87,18 @@ class LoadingOverlay extends React.Component { // If risk calc has completed, show modal if (this.props.isCalculatingRisks && !nextProps.isCalculatingRisks) { - //console.log("LoadingOverlay: setting showModal true"); stage = "Risk calculation"; if (nextProps.validationProgress.status !== "inactive") showModal = true; stateChanged = true; } + // If recommendations has completed, show modal + if (this.props.isCalculatingRecommendations && !nextProps.isCalculatingRecommendations) { + stage = "Recommendations"; + if (nextProps.validationProgress.status !== "inactive") showModal = true; + stateChanged = true; + } + // Show modal (error dialog) if loading has failed if (this.props.loadingProgress.status !== "failed" && nextProps.loadingProgress.status === "failed") { showModal = true; @@ -89,7 +113,6 @@ class LoadingOverlay extends React.Component { } if (this.props.isDroppingInferredGraph && !nextProps.isDroppingInferredGraph) { - //console.log("Dropped inferred graph - progress complete"); stage = "DroppingInferredGraph"; progress = 1.0; stateChanged = true; @@ -97,25 +120,28 @@ class LoadingOverlay extends React.Component { // If loading has started, start polling if (!this.props.isLoading && nextProps.isLoading) { - //console.log("LoadingOverlay: loading started. Initialising timeout"); stage = "Loading"; timeout = 0; stateChanged = true; } // If validation has started, start polling else if (!this.props.isValidating && nextProps.isValidating) { - //console.log("LoadingOverlay: validation started. Initialising timeout"); stage = "Validation"; timeout = 0; stateChanged = true; } // If risk calc has started, start polling else if (!this.props.isCalculatingRisks && nextProps.isCalculatingRisks) { - console.log("LoadingOverlay: risk calc started. Initialising timeout"); stage = "Risk calculation"; timeout = 0; stateChanged = true; } + // If recommendations has started, start polling + else if (!this.props.isCalculatingRecommendations && nextProps.isCalculatingRecommendations) { + stage = "Recommendations"; + timeout = 0; + stateChanged = true; + } if (stateChanged) { //here, we set both changes of state at the same time, otherwise initial change may be ignored @@ -130,9 +156,6 @@ class LoadingOverlay extends React.Component { } componentDidUpdate(prevProps, prevState) { - //console.log("LoadingOverlay: componentDidUpdate: state: ", prevState, this.state); - //console.log("LoadingOverlay: showModal = " + this.state.showModal); - // If loading has started, start polling if (!prevProps.isLoading && this.props.isLoading) { if (this.props.loadingId) { @@ -148,13 +171,18 @@ class LoadingOverlay extends React.Component { this.checkProgress(); } else if (!prevProps.isValidating && this.props.isValidating) { - //console.log("LoadingOverlay: validation started. Start polling"); this.checkProgress(); } else if (!prevProps.isCalculatingRisks && this.props.isCalculatingRisks) { - console.log("LoadingOverlay: risk calc started. Start polling"); this.checkProgress(); } + else if (!prevProps.isCalculatingRecommendations && this.props.isCalculatingRecommendations) { + this.checkProgress(); + } + else if (prevProps.isCalculatingRecommendations && !this.props.isCalculatingRecommendations) { + console.log("Recommendations finished - closing abort dialog..."); + this.setState({...this.state, abortRecommendationsModal: false}); + } else if (prevProps.loadingProgress.waitingForUpdate && !this.props.loadingProgress.waitingForUpdate) { this.checkProgress(); } @@ -164,9 +192,6 @@ class LoadingOverlay extends React.Component { else if (this.props.isDroppingInferredGraph) { this.checkProgress(); } - else { - //console.log("LoadingOverlay: polling componentDidUpdate (nothing to do)"); - } } getValidationTimeout() { @@ -183,6 +208,10 @@ class LoadingOverlay extends React.Component { return this.getTimeout(min, max, progress); } + getRecommendationsTimeout() { + return this.getRiskCalcTimeout(); + } + getLoadingTimeout() { let min = this.state.bounds.loading.min; let max = this.state.bounds.loading.max; @@ -194,12 +223,6 @@ class LoadingOverlay extends React.Component { let timeout = this.state.timeout; let increment = this.state.increment; - //console.log("current timeout: ", timeout); - //console.log("min: ", min); - //console.log("max: ", max); - //console.log("increment: ", increment); - //console.log("progress: ", progress); - // increment timeout initially, then decrement towards end if (progress < 0.4) { timeout += increment; @@ -208,13 +231,9 @@ class LoadingOverlay extends React.Component { timeout -= increment; } - //console.log("provisional timeout (before bounds check): ", timeout); - // check within min/max bounds timeout = (timeout < min) ? min : (timeout > max) ? max : timeout; - //console.log("LoadingOverlay: setting timeout: ", timeout); - //update state this.setState({...this.state, timeout: timeout}); @@ -224,7 +243,6 @@ class LoadingOverlay extends React.Component { checkProgress() { if (this.props.isValidating) { - //console.log("LoadingOverlay: validation progress: ", Math.round(this.props.validationProgress.progress * 100)); if (this.props.validationProgress.progress >= 1.0) { if (this.props.validationProgress.status === "completed") { console.log("LoadingOverlay: validation progress completed"); @@ -239,8 +257,6 @@ class LoadingOverlay extends React.Component { } } else if (this.props.isCalculatingRisks) { - console.log(this.props); - console.log("LoadingOverlay: risk calc progress: ", Math.round(this.props.validationProgress.progress * 100)); if (this.props.validationProgress.status === "inactive") { console.log("WARNING: isCalculatingRisks is true, but status is inactive"); this.props.dispatch(riskCalcFailed(this.props.modelId)); @@ -263,13 +279,34 @@ class LoadingOverlay extends React.Component { setTimeout(this.pollRiskCalcProgress, this.getRiskCalcTimeout()); } } + else if (this.props.isCalculatingRecommendations) { + if (this.props.validationProgress.status === "inactive") { + console.log("WARNING: isCalculatingRecommendations is true, but status is inactive"); + this.props.dispatch(recommendationsFailed(this.props.modelId)); + return; + } + else if (this.props.validationProgress.progress >= 1.0) { + if (this.props.validationProgress.status === "completed") { + console.log("LoadingOverlay: recommendations progress completed"); + this.props.dispatch(recommendationsCompleted(this.props.modelId, this.props.recommendationsJobId)); + } + else if (this.props.validationProgress.status === "failed") { + console.warn("LoadingOverlay: recommendations progress failed"); + this.props.dispatch(recommendationsFailed(this.props.modelId)); + } + else { + //This should not be necessary, but if server has not yet set completed state.. + setTimeout(this.pollRecommendationsProgress, this.getRecommendationsTimeout()); + } + } else { + setTimeout(this.pollRecommendationsProgress, this.getRecommendationsTimeout()); + } + } else if (this.props.isLoading) { - //console.log("LoadingOverlay: loading progress: ", Math.round(this.props.loadingProgress.progress * 100)); if (this.props.loadingProgress.progress >= 1.0) { //console.log("LoadingOverlay: loading progress completed"); } else { let timeout = this.getLoadingTimeout(); - //console.log("Calling setTimeout: " + timeout); setTimeout(this.pollLoadingProgress, timeout); } } @@ -295,8 +332,18 @@ class LoadingOverlay extends React.Component { } } + pollRecommendationsProgress() { + // While synchronous recommendations is running, the isCalculatingRecommendations flag is true, so poll for progress + // Once the recommendations call returns, the flag is set to false, so we avoid an unnecessary progress request below + if (this.props.isCalculatingRecommendations) { + this.props.dispatch(pollForRecommendationsProgress(this.props.modelId)); + } + else { + console.log("Recommendations complete. Cancel polling for progress"); + } + } + pollLoadingProgress() { - //console.log("LoadingOverlay: pollLoadingProgress: this.props.loadingId = " + this.props.loadingId); if (this.props.loadingId) { this.props.dispatch(pollForLoadingProgress($("meta[name='_model']").attr("content"), this.props.loadingId)); } @@ -314,19 +361,41 @@ class LoadingOverlay extends React.Component { } } + abortRecommendations(modelId, jobId) { + this.setState({...this.state, abortRecommendationsModal: false}); + this.props.dispatch(abortRecommendations(modelId, jobId)); + this.pollRecommendationsProgress(); + } + + getHeaderText() { + let header = ""; + + if (this.props.isValidating) { + header = "The model is currently validating"; + } + else if (this.props.isCalculatingRisks) { + header = "Calculating risks"; + } + else if (this.props.isCalculatingRecommendations) { + header = "Calculating recommendations (Esc to cancel)"; + } + + return header; + } + render() { let isCalculatingRisks = this.props.isCalculatingRisks && this.props.validationProgress.status !== "inactive"; - //console.log("isCalculatingRisks:", isCalculatingRisks); + let isCalculatingRecommendations = this.props.isCalculatingRecommendations && this.props.validationProgress.status !== "inactive" && !this.state.abortRecommendationsModal; let isDroppingInferredGraph = this.props.isDroppingInferredGraph; - //console.log("isDroppingInferredGraph", this.props.isDroppingInferredGraph); - var clazz = "loading-overlay " + (this.props.isValidating || isCalculatingRisks || isDroppingInferredGraph || this.props.isLoading ? "visible" : "invisible"); + let clazz = "loading-overlay " + (this.props.isValidating || isCalculatingRisks || isCalculatingRecommendations || isDroppingInferredGraph || this.props.isLoading ? "visible" : "invisible"); let stage = this.state.stage; + let headerText = this.getHeaderText(); return (
- {(this.props.isValidating || isCalculatingRisks) && + {(this.props.isValidating || isCalculatingRisks || isCalculatingRecommendations) &&
-

{this.props.isValidating ? "The model is currently validating" : "Calculating risks"}...

+

{headerText}...

{this.props.validationProgress.message}...

@@ -417,6 +486,9 @@ class LoadingOverlay extends React.Component { + + this.setState({...this.state, abortRecommendationsModal: false})}/>
) } @@ -426,10 +498,12 @@ LoadingOverlay.propTypes = { modelId: PropTypes.string, loadingId: PropTypes.string, isValidating: PropTypes.bool, + isCalculatingRisks: PropTypes.bool, + isCalculatingRecommendations: PropTypes.bool, + recommendationsJobId: PropTypes.string, isValid: PropTypes.bool, hasModellingErrors: PropTypes.bool, validationProgress: PropTypes.object, - isCalculatingRisks: PropTypes.bool, isLoading: PropTypes.bool, isDroppingInferredGraph: PropTypes.bool, loadingProgress: PropTypes.object, diff --git a/src/main/webapp/app/modeller/index.scss b/src/main/webapp/app/modeller/index.scss index 5a441c3c..550ae73a 100644 --- a/src/main/webapp/app/modeller/index.scss +++ b/src/main/webapp/app/modeller/index.scss @@ -255,10 +255,11 @@ body { .misbehaviour-explorer, .control-explorer, .control-strategy-explorer, +.recommendations-explorer, .compliance-explorer, +.explorer, .report-dialog { border: 1px inset $BLUE; - // border-radius: 0.3em; box-shadow: 0 0 2px 2px rgba(0, 0, 0, 0.3); background-color: $PANEL_BACKGROUND_COLOUR; z-index: 1065; @@ -266,6 +267,16 @@ body { flex-direction: column; height: 100%; width: 100%; + button.header { + border-bottom-width: 0px; + border-left-width: 0px; + border-right-width: 0px; + border-top-width: 0px; + padding-top: 0px; + padding-bottom: 0px; + padding-right: 0px; + padding-left: 0px; + } .header { cursor: all-scroll; width: 100%; @@ -273,12 +284,15 @@ body { background: $DARK_BLUE; color: $PANEL_BACKGROUND_COLOUR; padding: 0.3em 0em; + .title { + padding-top: 4px; + } h1 { margin: 0 0.3em; font-size: 14px; line-height: 20px; font-weight: 600; - display: inline-block; + float: left; } .menu-close { cursor: pointer; @@ -286,6 +300,27 @@ body { margin: 1px 0.3em 0 0.3em; float: right; } + .button { + cursor: pointer; + font-size: 18px; + margin: 0px; + float: right; + } + button { + border-top-width: 0px; + border-right-width: 0px; + border-left-width: 0px; + border-bottom-width: 0px; + padding-top: 0px; + padding-bottom: 0px; + background-color: transparent + } + button, i { + color: $PANEL_BACKGROUND_COLOUR; + } + } + .header-no-padding { + padding: 0; } .content { cursor: default; @@ -315,48 +350,27 @@ body { div.well.well-sm { &.strategy { padding: 5px; - background-color: #bcdbe4; - border: 1px ridge #bcdbe4; + background-color: #d5edf2; + border: 1px ridge #d5edf2; margin: 0; overflow: hidden; - .control { - width: 100%; - background-color: #1962c0; - border: 1px ridge #1962c0; - color: #fff; - margin: 0; - overflow: hidden; - padding: 2px; - display: inline-flex; - align-items: center; - - .control-description { - margin-left: 5px; - font-weight: 300; - color: white; - } - - &[disabled] { - .control-description { - color: darken(#fff, 30); - cursor: not-allowed; - } - } - } .optional { background-color: #3d8ed4; } &.enabled { - background-color: #a1e499; - border: 1px ridge #bcdbe4; + background-color: #03b5fb; + border: 1px ridge #03b5fb; margin: 0; overflow: hidden; &.blocked { - background-color: #6ee461; + background-color: #a1e499; } &.mitigated { - background-color: #a2ee54; + background-color: #a1e499; + } + &.triggered { + background-color: #f1d96b; } } } @@ -399,6 +413,80 @@ body { } } +.threat { + color: black; + .clickable { + color: black; + } + .clickable:hover { + color: black; + } + &.blocked { + background-color: #a1e499; + } + &.mitigated { + background-color: #a1e499; + } + &.triggered { + background-color: #f1d96b; + } + &.untriggered { + background-color: #e4e4e4; + } + &.normal { + background-color: #6ed1f8; + } + &.unmanaged { + background-color: #fea9a9; + } +} + +.threat:hover { + &.blocked { + background-color: darken(#a1e499, 10%); + } + &.mitigated { + background-color: darken(#a1e499, 10%); + } + &.triggered { + background-color: darken(#f1d96b, 10%); + } + &.untriggered { + background-color: darken(#e4e4e4, 10%); + } + &.normal { + background-color: darken(#6ed1f8, 10%); + } + &.unmanaged { + background-color: darken(#fea9a9, 10%); + } +} + +.control { + width: 100%; + background-color: #1962c0; + border: 1px ridge #1962c0; + color: #fff; + margin: 0; + overflow: hidden; + padding: 2px; + display: inline-flex; + align-items: center; + + .control-description { + margin-left: 5px; + font-weight: 300; + color: white; + } + + &[disabled] { + .control-description { + color: darken(#fff, 30); + cursor: not-allowed; + } + } +} + .traffic-lights { align-items: center; @@ -818,6 +906,9 @@ select.level + .fa-spinner { .clickable { color: #337ab7; cursor: pointer; + &.proposed { + font-weight: bold; + } } .clickable:hover { @@ -2021,3 +2112,7 @@ div.panel-title { margin: 2px 0; padding: 2px 0; } + +.recommendations .bare-list { + font-size: 14px; +} diff --git a/src/main/webapp/app/modeller/modellerConstants.js b/src/main/webapp/app/modeller/modellerConstants.js index c3f164ae..865356e9 100644 --- a/src/main/webapp/app/modeller/modellerConstants.js +++ b/src/main/webapp/app/modeller/modellerConstants.js @@ -52,9 +52,11 @@ export const CLOSE_MISBEHAVIOUR_EXPLORER = "CLOSE_MISBEHAVIOUR_EXPLORER"; export const OPEN_COMPLIANCE_EXPLORER = "OPEN_COMPLIANCE_EXPLORER"; export const OPEN_CONTROL_EXPLORER = "OPEN_CONTROL_EXPLORER"; export const OPEN_CONTROL_STRATEGY_EXPLORER = "OPEN_CONTROL_STRATEGY_EXPLORER"; +export const OPEN_RECOMMENDATIONS_EXPLORER = "OPEN_RECOMMENDATIONS_EXPLORER"; export const CLOSE_COMPLIANCE_EXPLORER = "CLOSE_COMPLIANCE_EXPLORER"; export const CLOSE_CONTROL_EXPLORER = "CLOSE_CONTROL_EXPLORER"; export const CLOSE_CONTROL_STRATEGY_EXPLORER = "CLOSE_CONTROL_STRATEGY_EXPLORER"; +export const CLOSE_RECOMMENDATIONS_EXPLORER = "CLOSE_RECOMMENDATIONS_EXPLORER"; export const SUPPRESS_CANVAS_REFRESH = "SUPPRESS_CANVAS_REFRESH"; export const REDRAW_RELATIONS = "REDRAW_RELATIONS"; export const HOVER_THREAT = "HOVER_THREAT"; @@ -66,15 +68,21 @@ export const IS_VALIDATING = "IS_VALIDATING"; export const IS_NOT_VALIDATING = "IS_NOT_VALIDATING"; export const IS_CALCULATING_RISKS = "IS_CALCULATING_RISKS"; export const IS_NOT_CALCULATING_RISKS = "IS_NOT_CALCULATING_RISKS"; +export const IS_CALCULATING_RECOMMENDATIONS = "IS_CALCULATING_RECOMMENDATIONS"; +export const IS_NOT_CALCULATING_RECOMMENDATIONS = "IS_NOT_CALCULATING_RECOMMENDATIONS"; export const IS_DROPPING_INFERRED_GRAPH = "IS_DROPPING_INFERRED_GRAPH"; export const IS_NOT_DROPPING_INFERRED_GRAPH = "IS_NOT_DROPPING_INFERRED_GRAPH"; export const RISK_CALC_RESULTS = "RISK_CALC_RESULTS"; +export const RECOMMENDATIONS_JOB_STARTED = "RECOMMENDATIONS_JOB_STARTED"; +export const RECOMMENDATIONS_RESULTS = "RECOMMENDATIONS_RESULTS"; export const VALIDATION_FAILED = "VALIDATION_FAILED"; export const RISK_CALC_FAILED = "RISK_CALC_FAILED"; +export const RECOMMENDATIONS_FAILED = "RECOMMENDATIONS_FAILED"; export const RESOLVE_RELATION_ISSUE = "RESOLVE_RELATION_ISSUE"; export const GET_ISSUES = "GET_ISSUES"; export const UPDATE_VALIDATION_PROGRESS = "UPDATE_VALIDATION_PROGRESS"; export const UPDATE_RISK_CALC_PROGRESS = "UPDATE_RISK_CALC_PROGRESS"; +export const UPDATE_RECOMMENDATIONS_PROGRESS = "UPDATE_RECOMMENDATIONS_PROGRESS"; export const UPDATE_LOADING_PROGRESS = "UPDATE_LOADING_PROGRESS"; export const UPDATE_THREAT_LOADING = "UPDATE_THREAT_LOADING"; export const UPDATE_DETAILS_LOADING = "UPDATE_DETAILS_LOADING"; diff --git a/src/main/webapp/app/modeller/reducers/modeller.js b/src/main/webapp/app/modeller/reducers/modeller.js index a6a6bc81..5c793c07 100644 --- a/src/main/webapp/app/modeller/reducers/modeller.js +++ b/src/main/webapp/app/modeller/reducers/modeller.js @@ -26,12 +26,15 @@ const modelState = { //riskLevelsValid: true, //don't set this initially (button will be coloured blue) saved: true, calculatingRisks: false, + calculatingRecommendations: false, controlsReset: false, canBeEdited: true, canBeShared: true, risksValid: false, riskCalculationMode: "" }, + recommendationsJobId: null, + recommendations: {}, // Rayna: TODO - when the backend for groups is implemented, put this array in the model above. groups: [], grouping: { @@ -81,6 +84,7 @@ const modelState = { } }, misbehaviourTwas: {}, + csgAssets: {}, isMisbehaviourExplorerVisible: false, isMisbehaviourExplorerActive: false, isComplianceExplorerVisible: false, @@ -89,6 +93,8 @@ const modelState = { isControlExplorerActive: false, isControlStrategyExplorerVisible: false, isControlStrategyExplorerActive: false, + isRecommendationsExplorerVisible: false, + isRecommendationsExplorerActive: false, isReportDialogVisible: false, isReportDialogActive: false, isDroppingInferredGraph: false, @@ -204,6 +210,7 @@ export default function modeller(state = modelState, action) { let model = action.payload; model.saved = true; //must be true if reloaded + model.calculatingRecommendations = false; //flag is not currently returned in model let groups = model.groups; @@ -238,6 +245,9 @@ export default function modeller(state = modelState, action) { model.threats.forEach(threat => setThreatTriggeredStatus(threat, model.controlStrategies)); } + //Get map of CSGs to asset + let csgAssets = getCsgAssets(model.threats); + return { ...state, model: model, @@ -250,7 +260,8 @@ export default function modeller(state = modelState, action) { ...state.selectedMisbehaviour, misbehaviour: misbehaviour, }, - misbehaviourTwas: misbehaviourTwas + misbehaviourTwas: misbehaviourTwas, + csgAssets: csgAssets, }; } @@ -315,7 +326,6 @@ export default function modeller(state = modelState, action) { if (action.type === instr.EDIT_MODEL) { let updatedModel = action.payload; - //console.log("EDIT_MODEL:", updatedModel); return { ...state, model: { @@ -327,86 +337,19 @@ export default function modeller(state = modelState, action) { } if (action.type === instr.UPDATE_VALIDATION_PROGRESS) { - if (action.payload.waitingForUpdate) { - //console.log("poll: UPDATE_VALIDATION_PROGRESS: (waiting for progress)"); - return { - ...state, validationProgress: { - ...state.validationProgress, - waitingForUpdate: action.payload.waitingForUpdate - } - }; - } - - let status = "running"; - - if (action.payload.status) { - status = action.payload.status; - } - else if (action.payload.message.indexOf("failed") != -1) { - console.log("Validation failed (detected from message)"); - status = "failed"; - } - else if (action.payload.message.indexOf("complete") != -1) { - console.log("Validation completed (detected from message)"); - status = "completed"; - } - - let error = action.payload.error != null ? action.payload.error : ""; - - return { - ...state, validationProgress: { - status: status, - progress: action.payload.progress, - message: action.payload.message, - error: error, - waitingForUpdate: action.payload.waitingForUpdate - } - }; + return updateProgress("Validation", state, action); } if (action.type === instr.UPDATE_RISK_CALC_PROGRESS) { - //console.log("UPDATE_RISK_CALC_PROGRESS", action.payload); - if (action.payload.waitingForUpdate) { - //console.log("poll: UPDATE_RISK_CALC_PROGRESS: (waiting for progress)"); - return { - ...state, validationProgress: { - ...state.validationProgress, - waitingForUpdate: action.payload.waitingForUpdate - } - }; - } - - let status = "running"; - - if (action.payload.status) { - status = action.payload.status; - } - else if (action.payload.message.indexOf("failed") != -1) { - console.log("Risk calc failed (detected from message)"); - status = "failed"; - } - else if (action.payload.message.indexOf("complete") != -1) { - console.log("Risk calc completed (detected from message)"); - status = "completed"; - } - - let error = action.payload.error != null ? action.payload.error : ""; + return updateProgress("Risk calc", state, action); + } - return { - ...state, validationProgress: { - status: status, - progress: action.payload.progress, - message: action.payload.message, - error: error, - waitingForUpdate: action.payload.waitingForUpdate - } - }; + if (action.type === instr.UPDATE_RECOMMENDATIONS_PROGRESS) { + return updateProgress("Recommendations", state, action); } if (action.type === instr.UPDATE_LOADING_PROGRESS) { - //console.log("UPDATE_LOADING_PROGRESS:", action.payload); if (action.payload.waitingForUpdate) { - //console.log("poll: UPDATE_LOADING_PROGRESS: (waiting for progress)"); return { ...state, loadingProgress: { ...state.loadingProgress, @@ -855,9 +798,6 @@ export default function modeller(state = modelState, action) { if (action.type === instr.IS_VALIDATING) { - - console.log("modellerReducer: model is validating"); - return { ...state, model: { @@ -877,9 +817,6 @@ export default function modeller(state = modelState, action) { } if (action.type === instr.IS_NOT_VALIDATING) { - - console.log("modellerReducer: model is not validating"); - return { ...state, model: { @@ -897,7 +834,6 @@ export default function modeller(state = modelState, action) { } if (action.type === instr.VALIDATION_FAILED) { - console.log("modellerReducer: validation failed"); return { @@ -910,7 +846,6 @@ export default function modeller(state = modelState, action) { } if (action.type === instr.RISK_CALC_FAILED) { - console.log("modellerReducer: risk calc failed"); return { @@ -922,10 +857,19 @@ export default function modeller(state = modelState, action) { }; } - if (action.type === instr.IS_CALCULATING_RISKS) { + if (action.type === instr.RECOMMENDATIONS_FAILED) { + console.log("modellerReducer: recommendationsc failed"); - console.log("modellerReducer: calculating risks for model"); + return { + ...state, + model: { + ...state.model, + calculatingRecommendations: false + }, + }; + } + if (action.type === instr.IS_CALCULATING_RISKS) { return { ...state, model: { @@ -944,9 +888,6 @@ export default function modeller(state = modelState, action) { } if (action.type === instr.IS_NOT_CALCULATING_RISKS) { - - console.log("modellerReducer: not calculating risks for model"); - return { ...state, model: { @@ -1133,6 +1074,60 @@ export default function modeller(state = modelState, action) { } + if (action.type === instr.IS_CALCULATING_RECOMMENDATIONS) { + return { + ...state, + model: { + ...state.model, + calculatingRecommendations: true + }, + recommendations: null, //clear previous results + validationProgress: { + status: "starting", + progress: 0.0, + message: "Starting calculation", + error: "", + waitingForUpdate: false + } + }; + } + + if (action.type === instr.IS_NOT_CALCULATING_RECOMMENDATIONS) { + return { + ...state, + model: { + ...state.model, + calculatingRecommendations: false + }, + validationProgress: { + status: "inactive", + progress: 0.0, + message: "", + error: "", + waitingForUpdate: false + }, + }; + } + + if (action.type === instr.RECOMMENDATIONS_JOB_STARTED) { + console.log("Recommendations job started: ", action.payload); + let jobId = action.payload.jobId; + return { + ...state, + recommendationsJobId: jobId + }; + } + + if (action.type === instr.RECOMMENDATIONS_RESULTS) { + let recommendations = action.payload; + return { + ...state, + recommendations: recommendations, + isRecommendationsExplorerVisible: true, + isRecommendationsExplorerActive: true, + }; + } + if (action.type === instr.IS_DROPPING_INFERRED_GRAPH) { console.log("modellerReducer: dropping inferred graph for model"); @@ -1651,6 +1646,22 @@ export default function modeller(state = modelState, action) { }; } + if (action.type === instr.OPEN_RECOMMENDATIONS_EXPLORER) { + return { + ...state, + isRecommendationsExplorerVisible: true, + isRecommendationsExplorerActive: true, + }; + } + + if (action.type === instr.CLOSE_RECOMMENDATIONS_EXPLORER) { + return { + ...state, + isRecommendationsExplorerVisible: false, + isRecommendationsExplorerActive: false, + }; + } + if (action.type === instr.OPEN_REPORT_DIALOG) { console.log("OPEN_REPORT_DIALOG"); return { @@ -2303,6 +2314,43 @@ export default function modeller(state = modelState, action) { return state; } +function updateProgress(task, state, action) { + if (action.payload.waitingForUpdate) { + return { + ...state, validationProgress: { + ...state.validationProgress, + waitingForUpdate: action.payload.waitingForUpdate + } + }; + } + + let status = "running"; + + if (action.payload.status) { + status = action.payload.status; + } + else if (action.payload.message.indexOf("failed") != -1) { + console.log(task + " failed (detected from message)"); + status = "failed"; + } + else if (action.payload.message.indexOf("complete") != -1) { + console.log(task + " completed (detected from message)"); + status = "completed"; + } + + let error = action.payload.error != null ? action.payload.error : ""; + + return { + ...state, validationProgress: { + status: status, + progress: action.payload.progress, + message: action.payload.message, + error: error, + waitingForUpdate: action.payload.waitingForUpdate + } + }; +} + function getAttackPathThreatRefs(attackPathData) { const prefix = attackPathData.prefix; const sortedAttackThreats = Array.from(new Map(Object.entries(attackPathData.threats))) @@ -2311,6 +2359,21 @@ function getAttackPathThreatRefs(attackPathData) { return sortedAttackThreats; } +//Generate map of Control Strategies to their associated asset +function getCsgAssets(threats) { + let csgAssets = {}; + + threats.forEach(threat => { + let assetUri = threat.threatensAssets; + let threatCsgs = Object.keys(threat.controlStrategies); + threatCsgs.forEach(threatCsg => { + csgAssets[threatCsg] = assetUri; + }); + }); + + return csgAssets; +} + function updateControlStrategies(threats, controlStrategies, controlSets) { let controlSetsMap = {}; controlSets.forEach(cs => { @@ -2334,20 +2397,22 @@ function updateControlStrategies(threats, controlStrategies, controlSets) { return cs; }); + //Get specific CSG type for this threat + let csgType = csg.threatCsgTypes[threat.uri]; + //CSG is enabled if there are no mandatory controls, or if one of them is not proposed csg.enabled = (mandatoryControlSets.length) = 0 ? true : mandatoryControlSets.find((control) => !control["proposed"]) === undefined; - if (csg.type !== "TRIGGER" && csg.enabled) { + if (csgType !== "TRIGGER" && csg.enabled) { threat.resolved = true; //if any non-triggering CSGs are enabled, the threat is resolved } - else if (csg.type === "TRIGGER" && csg.enabled) { + else if (csgType === "TRIGGER" && csg.enabled) { triggered = true; //if any triggering CSGs are enabled, the threat is triggered } }); // Finally, if triggered state has changed, update the threat if (threat.triggered !== triggered) { - //console.log("threat " + (triggered ? "triggered" : "untriggered") + ": " + threat.label); threat.triggered = triggered; } }); diff --git a/src/main/webapp/app/modeller/reducers/view.js b/src/main/webapp/app/modeller/reducers/view.js index 016eae22..b0f7c543 100644 --- a/src/main/webapp/app/modeller/reducers/view.js +++ b/src/main/webapp/app/modeller/reducers/view.js @@ -1,11 +1,12 @@ import * as instr from "../modellerConstants"; -var _ = require('lodash'); +let _ = require('lodash'); let defaultState = { windowOrder: [ { 'name': 'controlExplorer', order: 1065 }, { 'name': 'controlStrategyExplorer', order: 1065 }, + { 'name': 'recommendationsExplorer', order: 1065 }, { 'name': 'misbehaviourExplorer', order: 1065 }, { 'name': 'complianceExplorer', order: 1065 }, { 'name': 'reportDialog', order: 1065 }, @@ -17,14 +18,13 @@ let defaultState = { 'misbehaviourExplorer': 1065, 'controlExplorer': 1065, 'controlStrategyExplorer': 1065, + 'recommendationsExplorer': 1065, }; -var highestWindowOrder = 1074; -var hiddenWindowOrder = 1065; - +let highestWindowOrder = 1074; +let hiddenWindowOrder = 1065; export default function view(state=defaultState, action) { - //console.log("view:", state, action); if (action.type === instr.OPEN_WINDOW) { let newWindowOrder = []; let newWindowObjects = {}; diff --git a/src/main/webapp/build.gradle b/src/main/webapp/build.gradle index b5949db1..423b588c 100644 --- a/src/main/webapp/build.gradle +++ b/src/main/webapp/build.gradle @@ -12,7 +12,7 @@ version '0.0.1' buildDir = 'dist' node { - version = '12.13.0' + version = '14.21.3' download = true yarnVersion = '1.21.1' } diff --git a/src/main/webapp/package.json b/src/main/webapp/package.json index 00e61b6d..3b94bcee 100644 --- a/src/main/webapp/package.json +++ b/src/main/webapp/package.json @@ -10,7 +10,7 @@ }, "dependencies": { "@babel/runtime": "^7.16.5", - "axios": "^0.21.3", + "axios": "^0.28.0", "classnames": "^2.2.6", "es6-promise": "^4.2.4", "immutable": "^4.0.0-rc.12", @@ -24,6 +24,7 @@ "react-contextmenu": "2.11.0", "react-dom": "16.13.1", "react-hot-loader": "4.13.1", + "react-json-view-lite": "^1.2.1", "react-portal": "4.1.5", "react-redux": "5.0.7", "react-rnd": "^10.3.4", @@ -49,7 +50,7 @@ "babel-loader": "^8.2.3", "connect-history-api-fallback": "^1.5.0", "css-loader": "^6.5.1", - "express": "^4.17.2", + "express": "^4.19.2", "file-loader": "^6.2.0", "html-webpack-plugin": "^5.5.0", "http-proxy-middleware": "^2.0.6", @@ -61,7 +62,7 @@ "style-loader": "^3.3.1", "webpack": "^5.65.0", "webpack-cli": "^4.9.1", - "webpack-dev-middleware": "^5.3.0", + "webpack-dev-middleware": "^5.3.4", "webpack-hot-middleware": "^2.25.1" } } diff --git a/src/main/webapp/webpack/webpack.dev.config.js b/src/main/webapp/webpack/webpack.dev.config.js index 748edee5..e14e2565 100644 --- a/src/main/webapp/webpack/webpack.dev.config.js +++ b/src/main/webapp/webpack/webpack.dev.config.js @@ -1,11 +1,12 @@ -var webpack = require("webpack"); -var HtmlWebpackPlugin = require("html-webpack-plugin"); -var path = require("path"); -var autoprefixer = require("autoprefixer"); +const webpack = require("webpack"); +const HtmlWebpackPlugin = require("html-webpack-plugin"); +const path = require("path"); +const autoprefixer = require("autoprefixer"); -var ROOT = path.resolve(__dirname, "../"); -var SRC = path.resolve(ROOT, "app"); -var BUILD = path.join(ROOT, "dist"); +const ROOT = path.resolve(__dirname, "../"); +const SRC = path.resolve(ROOT, "app"); +const NODE_MODULES = path.resolve(ROOT, "node_modules"); +const BUILD = path.join(ROOT, "dist"); module.exports = { mode: 'development', @@ -40,7 +41,7 @@ module.exports = { ] }, { - test: /\.(sc|c)ss$/, + test: /\.scss$/, include: SRC, exclude: /node_modules/, use: [ @@ -63,6 +64,21 @@ module.exports = { loader: "sass-loader" // compiles SASS to CSS } ], + }, + { + test: /\.css$/, + include: [SRC, NODE_MODULES], + use: [ + { + loader: 'style-loader', // creates