diff --git a/_config.yml b/_config.yml index 0242a60636e..ddb9c1723cb 100644 --- a/_config.yml +++ b/_config.yml @@ -7,13 +7,13 @@ # Site Settings locale : "en-US" -title : "Rob Williams" +title : "About me, Click Here" title_separator : "-" -name : &name "Rob Williams" +name : &name "Nkereiso I Godswill" description : &description "Postdoc in Political Science" -url : https://jayrobwilliams.com # the base hostname & protocol for your site e.g. "https://mmistakes.github.io" +url : https://godswill-ikot.github.io # the base hostname & protocol for your site e.g. "https://mmistakes.github.io" baseurl : "" # the subpath of your site, e.g. "/blog" -repository : "jayrobwilliams/jayrobwilliams.github.io" +repository : "godswill-ikot/godswill-ikot.github.io" teaser : # filename of teaser fallback teaser image placed in /images/, .e.g. "500x300.png" breadcrumbs : false # true, false (default) words_per_minute : 160 @@ -79,20 +79,20 @@ analytics: google: tracking_id : goatcounter: - code : jayrobwilliams + code : godswill-ikot # Site Author author: - name : "Rob Williams" + name : "Nkereiso I Godswill" avatar : "profile.png" - bio : "Data Scientist" + bio : "Cybersecurity Analyst" location : employer : pubmed : - orcid : "http://orcid.org/0000-0001-9259-3883" - googlescholar : "https://scholar.google.com/citations?user=fiaPSmgAAAAJ" - email : "rob.williams@wustl.edu" + orcid : #"http://orcid.org/0000-0001-9259-3883" + googlescholar : #"https://scholar.google.com/citations?user=fiaPSmgAAAAJ" + email : "godswill08030444795@gmail.com" researchgate : # example: "https://www.researchgate.net/profile/yourprofile" uri : bitbucket : @@ -101,15 +101,15 @@ author: flickr : facebook : foursquare : - github : "jayrobwilliams" - gitlab : "jayrobwilliams" + github : "godswill-ikot" + gitlab : "godswill-ikot" google_plus : keybase : instagram : impactstory : #"https://profiles.impactstory.org/u/xxxx-xxxx-xxxx-xxxx" lastfm : linkedin : - mastodon : "fosstodon.org/@jayrobw" + mastodon : #"fosstodon.org/@jayrobw" pinterest : soundcloud : stackoverflow : #"https://stackoverflow.com/users/10912314/jayrobwilliams" diff --git a/_pages/about.md b/_pages/about.md index 9ff36b973e2..96ed77cd0ec 100644 --- a/_pages/about.md +++ b/_pages/about.md @@ -8,21 +8,18 @@ redirect_from: - /about.html --- -Welcome! I am a data scientist applying machine learning tools and causal -inference techniques to remote sensing data. I am an affiliated -researcher with the [Data-driven Analysis of Peace Project](https://dapp-lab.org) -and a research collaborator with the -[Research on International Policy Implementation Lab](https://bridgingthegapproject.org/ripil). +Welcome! I am a passionate and dedicated cybersecurity professional who constantly learns and applies security best practices, principles, and frameworks in securing assets. With a strong foundation in information security principles and risk management frameworks such as ISO 27001, COBIT, GDPR, ITIL and HIPPA for protecting critical data and systems, I am eager to contribute to building safer and more resilient digital environments. -I earned my PhD in Political Science from the -[University *of* North Carolina *at* Chapel Hill](https://www.unc.edu) and my -BA in Political Science from [Haverford College](https://www.haverford.edu). -My academic work has been [published](publications) in the -*American Political Science Review*, *International Studies Quarterly*, -*Conflict Management and Peace Science*, and -*Political Science Research and Methods*, among other outlets. This -[research](research) explores the causes and consequences of political violence -using a broad variety of methods such as latent variable models, geospatial -analysis, and big data. I have [taught](teaching) quantitative methodology and -international relations, and am a certified instructor with -[The Carpentries](https://carpentries.org). \ No newline at end of file +My expertise lies in building frameworks with the organisations' available resources in compliance with industry standards to help identify vulnerabilities, mitigate risks, and implement robust security measures to protect sensitive data in ever-evolving threat landscapes. +With hands-on and lab experience in network security, threat analysis, and incident response, I thrive in environments that demand critical thinking and quick decision-making. I am proficient in leveraging tools like SIEM systems, firewalls, Wireshark, Nessus, and Metasploit to identify and mitigate potential threats and vulnerability scanners, alongside scripting languages such as Python and Bash, to detect and neutralize security threats effectively and I am also proficient in the use of Mysql for database query. + +I earned my MSc in Cybersecurity from the +[University *of* Roehampton London]([https://www.roehampton.ac.uk/]) and my +B.Tech in Statics from [Federal University *of* Technology Akure]([https://www.futa.edu.ng/]) + +In addition to my academic certifications I also hold certifications such as [e.g., Google Cybersecurity Certificate, CompTIA Security+], which validate my technical skills and commitment to continuous learning. +My goal is to stay ahead of cyber adversaries by researching emerging technologies, analyzing attack patterns, and fostering a proactive security culture within organizations. + +Beyond my technical expertise, I excel in *Teaching* security with cross-functional teams, delivering comprehensive security awareness training, and translating complex cybersecurity concepts into actionable strategies for stakeholders. Whether defending critical infrastructure or enhancing incident response plans, I am dedicated to making the digital world a safer place. + +In my free time, I enjoy participating in capture-the-flag (CTF) competitions, exploring the latest cybersecurity trends, and mentoring aspiring professionals in the field. I believe that a strong defense starts with a knowledgeable and vigilant team, and I am always eager to contribute my skills and expertise to drive success in cybersecurity initiatives. diff --git a/_pages/research.md b/_pages/research.md index 9945ecb4248..e86d0858f2d 100644 --- a/_pages/research.md +++ b/_pages/research.md @@ -7,28 +7,20 @@ header: og_image: "research/ecdf.png" --- -My academic research falls into two main areas: understanding the influence of -geography on actor behavior before, during, and after civil conflict, and -developing new tools to improve the study of institutions (both formal and -informal) in peace and conflict. One strand of research in this first area -explores how the territories that ethnic groups inhabit shape rebel group -formation and condition their relationship with the state. My interest in -geography also informs projects on active conflicts including the targeting of -UN peacekeepers by insurgent groups, civilian victimization after rebel -territorial conquest, and communal violence in fragile settings. - -My other main research agenda uses advanced methods to develop new measures of -institutions. One project uses Bayesian item response theory to measure the -strength of peace agreements as a latent variable and free researchers from -post-treatment bias caused by using the duration of agreements as a proxy for -their strength. In others, I apply unsupervised learning techniques to over a -billion observations of product-level international trade data to measure -economic interdependence and illicit economic exchange. - -In a new avenue of research, I leverage social media data to explore -participation in extremist movements across multiple contexts, gaining insight -into the early stages of radicalization. +My academic MSc Cybersecurity at the University of Roehampton, London, investigates OS hardening and vulnerability auditing techniques in Windows and Linux platforms using automated assessment and system security benchmarking. The study and analysis evaluate the effects of Centre for Internet Security (CIS) Benchmarks implemented through granularity using CIS-CAT Lite and Microsoft Security Compliance Tool for Windows operating systems, and employ the same benchmark using OpenSCAP and CIS-CAT Lite for Linux system analysis. +This study utilised systematic implementation and testing across controlled virtual environments to examine adherence increase, security posture, performance impact, and operational check. The research methodology encircles foundational security assessments, progressive CIS-benchmark utilisation (Level 1 and 2), conformance validation, and quantitative cross-platform analysis. + +The results present significant security improvements across both platforms, with Ubuntu 20.04 achieving +26% and +25% from (59 - 85)% and (60 - 85)% for Level 1 before and after hardening, more increase were seen in Level 2 CIS Benchmarks, whose results are +27 and +36 from (51 - 78)% and (40 - 76)% for CIS-CAT and OpenSCAP, respectively. + +The findings of this research stress the growing need for standard security evaluation systems independent of personal and enterprise environments characterised by the coexistence of two or more operating systems. By applying the tools on an organised basis and critically analysing the results, the research showed how effective the tools are in highlighting inadequate security configurations, attack vectors, unplugged compliance holes, and potential security holes in both OS’s. + +Key results and findings showed that automated benchmarking tools offer substantial benefits to maintain stable and better security postures, such as strong cross-platform support and compatibility in CIS-CAT Lite. The study develops and recommends the best practice tool integration, reporting, and remediation practices. OpenSCAP turned out to be effective on Linux-based systems as a result of its native SCAP compliance, and Windows-specific checkmate was as a result of a combination of CIS-CAT (Lite) and security features in place. + +The research contributes interesting values in cybersecurity by providing inherent evidence of the effect of the tool, by creating evaluation approaches, and by constructing policies and frameworks for sustained security monitoring. Experience demonstrated that tremendous security improvements can be achieved by applying these benchmarking tools in an incremental phased manner by individuals and organisations, via measurable reductions in security risk exposure and enhanced conformance postures. + +[Screencast 1](https://onedrive.live.com/?viewid=48c9f1ae%2Dccb2%2D48f9%2Da4c6%2Daf0d6d99e8f3&login_hint=godswill08030444795%40gmail%2Ecom&id=%2Fpersonal%2F75b6705bbdcf6d5a%2FDocuments%2FAttachments%2FMSc%20Thesis%20review%20%2D%20Made%20with%20Clipchamp%5F1755857025383%2Emp4&parent=%2Fpersonal%2F75b6705bbdcf6d5a%2FDocuments%2FAttachments){: .btn--research} +[Screencast 2](https://onedrive.live.com/?viewid=48c9f1ae%2Dccb2%2D48f9%2Da4c6%2Daf0d6d99e8f3&login_hint=godswill08030444795%40gmail%2Ecom&id=%2Fpersonal%2F75b6705bbdcf6d5a%2FDocuments%2FWindows%20VM%20%2D%20Made%20with%20Clipchamp%5F1755853544062%2Emp4&parent=%2Fpersonal%2F75b6705bbdcf6d5a%2FDocuments){: .btn--research} {% include base_path %} diff --git a/_pages/software.md b/_pages/software.md deleted file mode 100644 index db965860c8b..00000000000 --- a/_pages/software.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -permalink: /software/ -title: "Software" -gallery: - - url: software/table.png - image_path: software/table.png - alt: "coefficient table generated by BayesPostEst" - title: "Coefficient table generated by BayesPostEst" - - url: software/coefplot.png - image_path: software/coefplot.png - alt: "coefficient plot generated by BayesPostEst" - title: "Coefficient plot generated by BayesPostEst" - - url: software/margeff.png - image_path: software/margeff.png - alt: "marginal effects plot generated by BayesPostEst" - title: "Marginal effects plot generated by BayesPostEst" -header: - og_image: "software/spatial_weighting.png" ---- - -As one of my two research agendas involves improving the tools we use to study peace and conflict, a good deal of my time is spent using statistical software. Below you'll find software for working with estimates from Bayesian models and some code that I've written to save time on tasks that I find myself doing over and over again. - -# BayesPostEst - -[![R build status](https://github.com/ShanaScogin/BayesPostEst/workflows/R-CMD-check/badge.svg)](https://github.com/ShanaScogin/BayesPostEst/actions) -[![CRAN_Status_Badge](https://www.r-pkg.org/badges/version/BayesPostEst)](https://CRAN.R-project.org/package=BayesPostEst) -[![Codecov test coverage](https://codecov.io/gh/ShanaScogin/BayesPostEst/branch/master/graph/badge.svg)](https://codecov.io/gh/ShanaScogin/BayesPostEst?branch=master) - -I am a developer of the [BayesPostEst](https://cran.r-project.org/package=BayesPostEst) R package for generating postestimation quantities of interest from Bayesian models. The package contains functions for producing regression tables, plotting predicted probabilities, calculating first differences, creating coefficient plots, and many other quantities. You can view the [Journal of Open Source Software](https://joss.theoj.org/) article for the package [here](https://doi.org/10.21105/joss.01722). - -{% include gallery %} - -To install the latest release on CRAN: - -```r -install.packages("BayesPostEst") -``` - -The latest [development version](https://github.com/ShanaScogin/BayesPostEst) on GitHub can be installed with: - -```r -library(remotes) -install_github("ShanaScogin/BayesPostEst") -``` - -You can try out the `mcmcCoefPlot` function from the package in the interactive R console below: - -# RWmisc - -[![R build status](https://github.com/jayrobwilliams/RWmisc/workflows/R-CMD-check/badge.svg)](https://github.com/jayrobwilliams/RWmisc/actions) -[![CRAN_Status_Badge](https://www.r-pkg.org/badges/version/RWmisc)](https://CRAN.R-project.org/package=RWmisc) -[![codecov](https://codecov.io/gh/jayrobwilliams/RWmisc/branch/master/graph/badge.svg)](https://codecov.io/gh/jayrobwilliams/RWmisc) - -I've collected convenience functions that I've written to address issues I frequently confront in my work into a personal R package called [RWmisc](https://CRAN.R-project.org/package=RWmisc). It includes functions for: - -- Managing multiple different projections for cross-national spatial data -- Converting latitude-longitude data in archaic forms (degrees, minutes, seconds) -- Correcting for overlapping polygons when aggregating raster data to polygons -- My custom minimal ggplot2 theme - -![](/images/software/spatial_weighting.png) - -To install the latest release on CRAN: - -```r -install.packages("RWmisc") -``` - -The latest [development version](https://github.com/jayrobwilliams/RWmisc) on GitHub can be installed with: - -```r -library(remotes) -install_github("jayrobwilliams/RWmisc") -``` - -# Other resources - -I also have a number of other software resources focused on making computation and academic life easier: - -- [The template](https://github.com/jayrobwilliams/JobMarket) I use for my academic job market materials - - Fill in school/position information in one file and it populates to all statements - - Generate summary statistics from teaching evaluations and integrate into statements - - Combine multiple teaching evaluations into a single portfolio document - - Do all of this programmatically with GNU Make! -- [The template](https://github.com/jayrobwilliams/UNC-Dissertation-Template) I used for my dissertation - - This satisfied the formatting requirements at UNC in 2019 - - Some tweaking likely required to use at another institution or in the future -- [Scripts](https://github.com/jayrobwilliams/Teaching) that I use to save time on various teaching-related tasks like grading -- [Functions](https://github.com/jayrobwilliams/ComputerVision) for extracting still frames from videos and information from images in Python using OpenCV -- [Compiling OpenCV](/files/html/OpenCV_Install.html) from source for Anaconda virtual environments instead of Homebrew ones or system Python installations diff --git a/_pages/softwares.md b/_pages/softwares.md new file mode 100644 index 00000000000..7c47f905377 --- /dev/null +++ b/_pages/softwares.md @@ -0,0 +1,413 @@ +--- +permalink: /software/ +title: "Softwares" +gallery: +--- +Software is a set of instructions, data, or programs used to operate computers and execute specific tasks. In the world of technology research, documentation of tools, applications, and software employed is fundamental to the completeness and reproducibility of any study. +### OS scan, audit and hardening +This study entails installing, configuring, scanning, auditing and hardening for two (2) popular 'Operating Systems' which are in four (4) parts. The 1st is the installation of OpenSCAP, CIS-CAT Lite Assessor, and Lynis on Ubuntu 20.04 workstation; the 2nd is the installation of Microsoft Security Compliance Toolkit and CIS-CAT Lite Assessor on Windows 10 Enterprise; the 3rd is the scanning and auditing of both OS's; while the 4th is remediation/hardening of discovered vulnerabilities after the scan while simultaneously gathering system performance metrics (memory usage, disk I/O, and CPU load) for (pre-scan, during scan, and post-scan) as security operation is conducted. +### Phase 1: OS Audit and Hardening tools installation on Ubuntu 20.04 +#### [OpenSCAP](https://www.open-scap.org/) Installation +The OpenSCAP will be installed using the 'Build and install from SOURCE method' as the traditional way of installation lacks important dependencies, workbench profiles, compliance security guide and dev environment. Below is the following steps for the installation procedure. +```bash +## Update system and install basic tools +sudo apt update && sudo apt install curl wget git vim -y +## Install OpenSCAP dependencies +sudo apt install cmake build-essential pkg-config \ + libxml2-dev libxslt1-dev libpcre3-dev libcurl4-openssl-dev \ + librpm-dev libbz2-dev libxmlsec1-dev libglib2.0-dev \ + libacl1-dev libselinux1-dev libdbus-1-dev libpopt-dev \ + python3-dev python3-pytest doxygen swig -y +``` +```bash +# Clone and build OpenSCAP from the repository +git clone https://github.com/OpenSCAP/openscap.git +cd openscap # Navigate to the project directory +mkdir build # Create and enter build directory +cd build +cmake ../ # Generate build files with CMake +make -j$(nproc) # Compile using all available CPU cores +sudo make install # Install Openscap +``` +#### Locate and create symlink for oscap binary +```bash +# Find where OpenSCAP was installed (system-wide) +sudo find /usr/local -name "oscap" -type f 2>/dev/null +# Find oscap binary in build directory +find ~/openscap/build -name "oscap" -type f 2>/dev/null +# Create symlink to make oscap available system-wide +sudo ln -s /home/$USER/openscap/build/utils/oscap /usr/local/bin/oscap +oscap --version # Verify installation +``` +#### [CIS Compliance](https://github.com/ComplianceAsCode/content) Benchmark and Security Guide installation for OpenSCAP +After installing OpenSCAP, also install its security profile from GitHub with: +```bash +# Download the latest SCAP Security Guide +wget https://github.com/ComplianceAsCode/content/releases/download/v0.1.76/scap-security-guide-0.1.76.zip +# Extract the archive +unzip scap-security-guide-0.1.76.zip +# Create directory for SCAP content +sudo mkdir -p /usr/share/xml/scap/ssg/content/ +# Copy SCAP content files to system directory +cd scap-security-guide-0.1.76 +sudo cp *.xml /usr/share/xml/scap/ssg/content/ +# Verify content installation +sudo ls -la /usr/share/xml/scap/ssg/content/ +# Find Ubuntu-specific SCAP content +sudo find /usr/share -name "*ubuntu*" | grep -i scap + +# Test Ubuntu 20.04 content availability +sudo oscap info /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml 2>/dev/null || echo "Ubuntu 20.04 +# List all available DataStream files +ls -la /usr/share/xml/scap/ssg/content/ssg-*-ds.xml +``` +```bash +# Run SCAP evaluation with standard profile for Ubuntu 20.04 +sudo oscap xccdf eval \ + --profile xccdf_org.ssgproject.content_profile_standard \ + --results results.xml \ + --report report.html \ + /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml +# Alternative: Run evaluation for Ubuntu 22.04 (or choose your version if available) +# sudo oscap xccdf eval \ +# --profile xccdf_org.ssgproject.content_profile_standard \ +# --results results-ubuntu2204.xml \ +# --report report-ubuntu2204.html \ +# /usr/share/xml/scap/ssg/content/ssg-ubuntu2204-ds.xml +# Show all available profiles for Ubuntu 20.04 +sudo oscap info /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml | grep -A 50 "Profiles:" +``` +#### Additional useful commands +```bash +# Generate remediation script +sudo oscap xccdf generate fix \ + --profile xccdf_org.ssgproject.content_profile_standard \ + --output remediation-script.sh \ + /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml +# Check for specific compliance frameworks +sudo oscap info /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml | grep -i "cis\|nist\|pci" +# To view HTML report use: firefox report.html or your preferred browser +``` +```bash +# Troubleshoot: If content files are missing, check what was actually extracted +ls -la scap-security-guide-0.1.76/ +# Verify oscap can access the files +sudo oscap --version +# Check file permissions +ls -la /usr/share/xml/scap/ssg/content/ +# If evaluation fails, run with verbose output +sudo oscap xccdf eval \ + --profile xccdf_org.ssgproject.content_profile_standard \ + --results results.xml \ + --report report.html \ + --verbose INFO \ + /usr/share/xml/scap/ssg/content/ssg-ubuntu2204-ds.xml +``` +#### [CIS-CAT Lite](https://learn.cisecurity.org/cis-cat-lite) Audit Installation +CIS-CAT Lite is an audit tool incorporated with CIS benchmark and is useful for manual hardening. +```bash +# CIS-CAT Lite Installation and Setup +sudo apt install -y openjdk-11-jdk # Install Java (CIS-CAT dependency) OpenJDK 11 +java --version # Verify Java installation +echo $JAVA_HOME # Check Java environment +which java +``` +#### Set up CIS-CAT directory structure +```bash +# Create CIS-CAT working directory +mkdir -p ~/ciscat +cd ~/ciscat +# Extract to openscap build folder if preferred +unzip ~/Downloads/'CIS-CAT Lite Assessor v4.55.0.zip' -d ~/openscap/build/ + +# Navigate to extracted CIS-CAT directory +cd ~/ciscat/Assessor-CLI/ +chmod +x ./Assessor-CLI.sh # Make the assessor executable +./Assessor-CLI.sh # Run CIS-CAT with basic options +# Run with HTML report output +./Assessor-CLI.sh -b benchmarks/CIS_Ubuntu_Linux_20.04_LTS_Benchmark_v1.1.0-xccdf.xml -r ~/ciscat/reports/ +``` +#### Troubleshooting +```bash +# If Java version issues occur +java --version +# Check if CIS-CAT files are properly extracted +find ~/ciscat -name "*.sh" -type f +# If permission denied errors +chmod -R +x ~/ciscat/Assessor-CLI/ +``` +#### [Lynis](https://github.com/CISOfy/Lynis) Audit Installation +This is a deep scanning and audit tool that will be used to verify `True/False Positive or Negative`. +```bash +sudo git clone https://github.com/CISOfy/lynis.git # Clone the Lynis repository +# Create a symlink to run Lynis from anywhere +sudo ln -s /opt/lynis/lynis /usr/local/bin/lynis +lynis --version # Verify installation +sudo lynis audit system # Test run system audit +``` +### Phase 2: Microsoft SCT and CIS-CAT installation on Windows_10_Enterprise +#### [Microsoft Security Compliance Toolkit](https://www.microsoft.com/en-us/download/details.aspx?id=55319) +Since the researcher is using a virtual machine, there isa need to install a compatible scripting environment, hence installing the latest `PowerShell 7 Installation` +```powershell +# Navigate to the root of C: drive +Set-Location C:\ +# Check Winget version (ensure it is installed) +winget --version +# Install the latest PowerShell 7 using Winget (requires Admin rights) +winget install --id Microsoft.Powershell --source winget +``` +`Microsoft Security Compliance Toolkit installation` +```powershell +# Quick all-in-one Microsoft Security Compliance Toolkit (SCT) installer +function Install-CompleteSCT { + $SCTPath = "C:\SecurityCompliance" + New-Item -Path $SCTPath -ItemType Directory -Force + + Write-Host "📥 Downloading complete Microsoft SCT suite..." -ForegroundColor Cyan + + # All major SCT components + $downloads = @{ + "Windows10-21H2" = "https://download.microsoft.com/download/8/5/C/85C25433-A1B0-4FFA-9429-7E023E7DA8D8/Windows%2010%20Version%2021H2%20Security%20Baseline.zip" + "Windows10-2004" = "https://download.microsoft.com/download/8/5/C/85C25433-A1B0-4FFA-9429-7E023E7DA8D8/Windows%2010%20Version%202004%20and%20Windows%20Server%20Version%202004%20Security%20Baseline.zip" + "PolicyAnalyzer" = "https://download.microsoft.com/download/8/5/C/85C25433-A1B0-4FFA-9429-7E023E7DA8D8/PolicyAnalyzer.zip" + "EdgeBaseline" = "https://download.microsoft.com/download/8/5/C/85C25433-A1B0-4FFA-9429-7E023E7DA8D8/Microsoft%20Edge%20Security%20Baseline.zip" + } + + foreach ($name in $downloads.Keys) { + $url = $downloads[$name] + $zipFile = "$SCTPath\$name.zip" + $extractPath = "$SCTPath\$name" + + Write-Host "🔽 Downloading $name..." -ForegroundColor Yellow + Invoke-WebRequest -Uri $url -OutFile $zipFile -UseBasicParsing + + Write-Host "📦 Extracting $name..." -ForegroundColor Yellow + Expand-Archive -Path $zipFile -DestinationPath $extractPath -Force + Remove-Item $zipFile -Force + + Write-Host "✅ $name installed" -ForegroundColor Green + } + + Write-Host "`n🎉 Complete SCT installation finished!" -ForegroundColor Green + Write-Host "📁 Location: $SCTPath" -ForegroundColor Cyan + + Start-Process explorer.exe -ArgumentList $SCTPath +} + +# Run complete installation +Install-CompleteSCT +``` +`Java 11 installation` +```powershell +# Install OpenJDK 11 (required for running CIS-CAT Lite Assessor) +winget install --id Microsoft.OpenJDK.11 --source winget +# Refresh the environment variable for the current session +$env:Path = [System.Environment]::GetEnvironmentVariable("Path", "Machine") + ";" + + [System.Environment]::GetEnvironmentVariable("Path", "User") Java -version +``` +#### [CIS-CAT Lite](https://learn.cisecurity.org/cis-cat-lite) Audit Installation for Windows_10 +```powershell +New-Item -ItemType Directory -Path "$env:USERPROFILE\Desktop\MSc" -Force # Create a folder MSc +# Extract the CIS-CAT Lite zip file to the MSc folder +Expand-Archive -Path "$env:USERPROFILE\Downloads\CIS-CAT Lite Assessor v4.54.1.zip" ` + -DestinationPath "$env:USERPROFILE\Desktop\MSc" -Force +Set-Location "$env:USERPROFILE\Desktop\MSc" # Navigate to CIS-CAT Assessor folder +Get-ChildItem # Step 4: View folder contents +Set-Location ".\CIS-CAT Lite Assessor v4.54.1\Assessor" #Enter the Assessor directory +# Step 6: Confirm the contents (.jar files, config folders, etc.) +Get-ChildItem +# To scan with CIS benchmark +java -jar Assessor-CLI.jar -b "benchmarks\CIS_Microsoft_Windows_10_Enterprise_Benchmark_v4.0.0-xccdf.xml" -p "Level 1 (L1) - Corporate/Enterprise Environment (general use)" -html -txt +``` +### Phase 3: Scanning and Baseline Assessment of both OSs +Click on [**THESIS SCRIPT**](https://godswill-ikot.github.io/research/Scripts/) to review, copy and use the script used in my actual thesis research titled "Operating System Hardening and Vulnerability Assessment Using CIS Benchmarking and System Security Tools on Windows and Linux". +Note: The script contains every tool on this page and every system info needed to verify system performance for pre, during and post scan. +#### Scanning and assessment Using OpenSCAP on Ubuntu 20.04 +```bash +# Create timestamped directories for better organisation +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +mkdir -p ~/openscap-results/baseline/$TIMESTAMP +# CIS LEVEL 1 Workstation +oscap xccdf eval \ + --profile xccdf_org.ssgproject.content_profile_cis_level1_workstation \ + --results ~/openscap-results/baseline/$TIMESTAMP/cis-l1-workstation-results.xml \ + --report ~/openscap-results/baseline/$TIMESTAMP/cis-l1-workstation-report.html \ + /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml +# CIS LEVEL 2 Workstation +oscap xccdf eval \ + --profile xccdf_org.ssgproject.content_profile_cis_level2_workstation \ + --results ~/openscap-results/baseline/$TIMESTAMP/cis-l2-workstation-results.xml \ + --report ~/openscap-results/baseline/$TIMESTAMP/cis-l2-workstation-report.html \ + /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml +# Run with OVAL results and verbose output +oscap xccdf eval \ + --profile xccdf_org.ssgproject.content_profile_cis_level1_workstation \ + --results ~/openscap-results/baseline/cis-l1-workstation-detailed-results.xml \ + --report ~/openscap-results/baseline/cis-l1-workstation-detailed-report.html \ + --oval-results \ + --verbose INFO \ + /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml +``` +#### Scanning and Assessment using CIS-CAT on Ubuntu 20.04 +```bash +# List available profiles first +java -jar Assessor-CLI.jar \ + -b "benchmarks/CIS_Ubuntu_Linux_20.04_LTS_Benchmark_v3.0.0-xccdf.xml" \ + -D session.type=local \ + -p +chmod +x Assessor.sh # Give permission to script +sudo ./Assessor.sh # Run script + # OR +# Run Level 1 Workstation assessment +sudo java -jar Assessor-CLI.jar \ + -D session.type=local \ + -b "benchmarks/CIS_Ubuntu_Linux_20.04_LTS_Benchmark_v3.0.0-xccdf.xml" \ + -p "Level 1 - Workstation" \ + -html +``` +#### View, Assess and Generate Remediation scripts for results for Ubuntu scan +After scans, it's important to view and assess the results to help understand the important areas of the OS to apply hardening security so as not to disrupt the OS functionality generally. +```bash +## Open HTML Reports for CIS-CAT (Graphical Interface) +# Option 1: Firefox (if available) +firefox *.html & + +# Option 2: Default system browser +xdg-open *.html + +# Option 3: Chromium/Chrome +chromium-browser *.html & +google-chrome *.html & + +# Open HTML reports (if GUI available) +# firefox ~/openscap-results/baseline/ubuntu-cis-l1-workstation-report.html & +# firefox ~/openscap-results/baseline/ubuntu-cis-l2-workstation-report.html & + +# Generate remediation script for CIS Level 1 Workstation +oscap xccdf generate fix \ + --profile xccdf_org.ssgproject.content_profile_cis_level1_workstation \ + --output ~/openscap-results/baseline/cis-l1-workstation-remediation.sh \ + /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml +# Generate remediation script for CIS Level 2 Workstation +oscap xccdf generate fix \ + --profile xccdf_org.ssgproject.content_profile_cis_level2_workstation \ + --output ~/openscap-results/baseline/cis-l2-workstation-remediation.sh \ + /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml +# Make remediation scripts executable +chmod +x ~/openscap-results/baseline/cis-l*-workstation-remediation.sh +``` +### Phase 4: Remediation of vulnerabilities +The [**REMEDIATION**](https://godswill-ikot.github.io/research/Remediation/) process is divided into parts which are "Full and Selective/Manual" remediation +#### Option A: Run Full Generated Script (CAUTION) for Ubuntu 20.04 +```bash +# BACKUP FIRST - This will make system-wide changes +echo "WARNING: This will make significant system changes!" +echo "Press Ctrl+C to cancel, or Enter to continue..." +read + +# Create restoration point +sudo cp /etc/passwd /etc/passwd.pre-remediation +sudo cp /etc/shadow /etc/shadow.pre-remediation +sudo cp -r /etc/ssh /etc/ssh.pre-remediation + +# Run the full remediation +echo "Running CIS Level 1 remediation..." +sudo ./cis-l1-workstation-remediation.sh 2>&1 | tee remediation-log.txt +``` +#### Option B: Run Selective Remediation (RECOMMENDED) for Ubuntu 20.04 +```bash +# Run the custom selective remediation +echo "Running selective remediation..." +sudo ./custom-remediation.sh 2>&1 | tee selective-remediation-log.txt + OR +# Extract specific remediation commands and run them individually + +# Fix file permissions +echo "=== Fixing file permissions ===" +sudo chmod 644 /etc/passwd +sudo chmod 600 /etc/shadow +sudo chmod 600 /etc/gshadow +sudo chmod 644 /etc/group + +# Configure SSH (if needed) +sudo sed -i 's/#PermitRootLogin yes/PermitRootLogin no/' /etc/ssh/sshd_config +sudo sed -i 's/#PasswordAuthentication yes/PasswordAuthentication no/' /etc/ssh/sshd_config +sudo systemctl restart ssh + +# Set password policies +sudo apt install libpam-pwquality -y +sudo sed -i 's/# minlen = 8/minlen = 14/' /etc/security/pwquality.conf +sudo sed -i 's/# dcredit = 0/dcredit = -1/' /etc/security/pwquality.conf +sudo sed -i 's/# ucredit = 0/ucredit = -1/' /etc/security/pwquality.conf +sudo sed -i 's/# lcredit = 0/lcredit = -1/' /etc/security/pwquality.conf +sudo sed -i 's/# ocredit = 0/ocredit = -1/' /etc/security/pwquality.conf + +# Configure system auditing +echo "=== Installing and configuring audit daemon ===" +sudo apt install -y auditd audispd-plugins +sudo systemctl enable auditd +sudo systemctl start auditd + +# Configure login banner +echo "=== Setting up login banner ===" +sudo tee /etc/issue << 'EOF' +WARNING: Unauthorized access to this system is prohibited. +All connections are monitored and recorded. +EOF + +sudo tee /etc/issue.net << 'EOF' +WARNING: Unauthorized access to this system is prohibited. +All connections are monitored and recorded. +EOF +``` +Verify Remediation Results for Ubuntu 20.04 +```bash +# Re-run the assessment to check improvements +mkdir -p ~/openscap-results/post-remediation + +# Run post-remediation assessment +oscap xccdf eval \ + --profile xccdf_org.ssgproject.content_profile_cis_level1_workstation \ + --results ~/openscap-results/post-remediation/cis-l1-post-remediation-results.xml \ + --report ~/openscap-results/post-remediation/cis-l1-post-remediation-report.html \ + /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml + +# Compare before and after +``` +Rollback Procedure (If Needed) for Ubuntu 20.04 +```bash +# Create rollback script +cat > rollback-remediation.sh << 'EOF' +#!/bin/bash +echo "Rolling back remediation changes..." + +# Restore from backup +BACKUP_DIR="/root/openscap-backup/$(ls -1 /root/openscap-backup/ | tail -1)" +if [ -d "$BACKUP_DIR" ]; then + cp -r $BACKUP_DIR/ssh /etc/ + cp $BACKUP_DIR/login.defs /etc/ + systemctl restart ssh + echo "Rollback completed from: $BACKUP_DIR" +else + echo "No backup directory found!" +fi +EOF + +chmod +x rollback-remediation.sh +``` +Monitoring and Maintenance for Ubuntu 20.04 +```bash +# Set up regular compliance checking +cat > /etc/cron.weekly/openscap-check << 'EOF' +#!/bin/bash +/usr/local/bin/oscap xccdf eval \ + --profile xccdf_org.ssgproject.content_profile_cis_level1_workstation \ + --results /var/log/openscap/weekly-$(date +%Y%m%d)-results.xml \ + --report /var/log/openscap/weekly-$(date +%Y%m%d)-report.html \ + /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml +EOF + +sudo mkdir -p /var/log/openscap +sudo chmod +x /etc/cron.weekly/openscap-check +``` + diff --git a/_pages/teaching.md b/_pages/teaching.md index 603c56b840f..9964f220405 100644 --- a/_pages/teaching.md +++ b/_pages/teaching.md @@ -1,6 +1,6 @@ --- -permalink: /teaching/ -title: "Teaching" +#permalink: /teaching/ +#title: "Teaching" --- Research plays a central role in my teaching as students improve their diff --git a/_posts/2022-03-31-so-it-goes.md b/_posts/2022-03-31-so-it-goes.md index 08710da583e..572ed86f998 100644 --- a/_posts/2022-03-31-so-it-goes.md +++ b/_posts/2022-03-31-so-it-goes.md @@ -7,15 +7,19 @@ output: knit: (function(inputFile, encoding) { rmarkdown::render(inputFile, encoding = encoding, output_dir = "../_posts") }) date: 2022-03-31 -permalink: /posts/2022/03/so-it-goes +permalink: /posts/2025/10/All about Zabbix for web-site monitoring excerpt_separator: toc: false header: og_image: "posts/so-it-goes/date_hist-1.png" tags: - career - - data-science - - visualization + - cybersecurity + - zabbix + - apache + - Mysql + - _php_ + - wordpress --- When I was applying to graduate school and asking for letters of diff --git a/_posts/2022-07-05-insufficient-data.md b/_posts/2022-07-05-insufficient-data.md index dc081ef3cf2..f128d96cf6e 100644 --- a/_posts/2022-07-05-insufficient-data.md +++ b/_posts/2022-07-05-insufficient-data.md @@ -1,76 +1,200 @@ --- -title: 'There is as Yet Insufficient Data for a Meaningful Answer' -date: 2022-07-05 -permalink: /posts/2022/07/insufficient-data +title: 'Zabbix and web monitoring' +date: 2025-01-15 +permalink: /posts/2025/15/Zabbix for monitoring a website excerpt_separator: toc: true tags: - career - - data-science + - cyber security + - zabbix --- - -Since taking a job as a data scientist three months ago, I've spoken with multiple political science PhD students who are interested in potentially making the same transition. This post synthesizes what I've said in those conversations with what I've learned in my first three months on the job, and I hope it will be helpful to anyone in the same position I was six months ago. - -As I mentioned in my [previous post](/posts/2022/03/so-it-goes), I'm drawing inferences from an *n* of one, so take anything I say with a hefty grain of salt.[^time] While I'm structuring this post largely as pieces of advice, keep in mind that these were things that worked for me, and may not generalize.[^negotiation] - -[^time]: Three months is also [far too short a time](https://archive.org/details/Science_Fiction_Quarterly_New_Series_v04n05_1956-11_slpn/page/n5/mode/2up?view=theater) to reach a definitive conclusion on this topic. - -[^negotiation]: Talking to other data scientists with similar backgrounds, which I discuss [below](#things-to-do), was useful because it gave me information and context that I was able to draw on when negotiating salary. However, an extensive [body](https://www.newyorker.com/science/maria-konnikova/lean-out-the-dangers-for-women-who-negotiate) [of](https://hbr.org/2014/06/why-women-dont-negotiate-their-job-offers) [research](https://www.npr.org/2007/08/06/12529237/for-women-pay-negotiations-can-bear-social-cost) finds that women are penalized for negotiating where men are rewarded for it. This is just one reminder of the fact that something that I found helpful may be less useful for you. - -# Differences from the academic job market - -Some important differences between the academic and nonacademic job markets that are useful to consider at the start: - -- Timelines are faster than faculty searches, but they are far less consistent. One process took almost three months, while another took less than three weeks. -- Not a single employer asked for letters of recommendation. One contacted references. -- Who you talk to varies greatly. For some positions my first contact was an HR phone screen, for others it was a 30 minute initial interview with the hiring manager. -- Performance tasks, otherwise known as coding assignments (or, more accurately, unpaid work), are common. These are just a fact of life for data science jobs. They varied from straightforward problem sets to research design memos, but not every job I interviewed for required them. -- There will probably be a technical interview. As these were not software engineering jobs, most of the ones I encountered tried to assess whether you know the basics of analyzing data in your language of choice and to get some insight into your problem solving approaches. -- Job talks are much less common, but not unheard of. Only two of the positions I interviewed for required a technical presentation, and unlike in academia, there is absolutely zero stigma against presenting coauthored work. -- Based on some very informal reckoning, automated HR rejection emails seem to be about as common for nonacademic jobs as academic ones.[^rejections] When they do come, these emails are much faster than in academia: days or weeks instead of months. -- Get ready for a new world of terminology and titles. In the same way that the assistant $\rightarrow$ associate $\rightarrow$ full professor progression baffles many outside of academia, I felt very lost upon encountering ads for senior, principal, and lead data scientists, and especially so when I applied to one for a data science technical adviser. -- Similarly, get ready to navigate the variety of different jobs that can fall under the umbrella of data scientist. Does a job list SQL, Tableau, and Excel as the most important technical skills? That's probably more of a data analyst position. TensorFlow, Dask, and C++? That's likely more of machine learning engineer job. If you're anything like me, you want to aim for the middle ground between these two. - -[^rejections]: This was a pleasant surprise for me, as I still have vivid memories of sending résumé after résumé out into the void as a fresh poli sci BA in 2012 and almost never hearing back. - -# The nonacademic résumé - -Probably the biggest transition when starting to apply for data science jobs was the shift from an academic CV to a nonacademic résumé. A CV lists functionally every major accomplishment you've achieved in your time in the field, while a résumé is highly targeted for a specific position. When applying to academic jobs, I wrote a (semi) customized cover letter for every job, and then included the relevant version of my CV (conflict, methods, or teaching). Each of these CVs contained the same information, just in a different order. In contrast, I significantly edited the skills section of most résumés I sent out based on the job listing. The [WashU career center](https://students.wustl.edu/career-center) has a [fantastic handout](https://students.wustl.edu/wp-content/uploads/2021/02/Resumes-and-CVs-2021-Final-1.pdf) on differences between the two documents and how to adapt a CV into a résumé that I drew on heavily in this process. - -In my opinion, the conventional wisdom that a résumé can only ever be one page is an overcorrection from the never-ending academic CV. The résumé I used to apply for jobs was two pages: the first included work experience, education, and a list of technical skills, while the second was project-oriented, and covered two publications, a couple of blog posts, a Shiny dashboard, teaching materials for the grad stats lab I taught. You definitely want to include links here, not just to the final product, but also the code behind it where relevant (replication materials for publications, git repos for smaller projects). This is an excellent opportunity to showcase work that uses data science skills to show something interesting, but wouldn't be considered novel enough for publication in an academic journal. Here are some other points that may be helpful when writing a résumé: - -- No one is likely to care that you wrote an undergraduate thesis or received a masters in passing (I did both, neither are on my résumé). An important exception to the latter point applies if you will be leaving your program without finishing your PhD; definitely list an in-passing masters in this case. Similarly, if you received a masters in a separate (more technical) program during your PhD, e.g., statistics or data science, be sure to list it as well. -- Social sciences can be a bit out of left field for data science hiring managers, so my résumé did include a "Concentrations: quantitative methodology and international relations" sub-bullet under my PhD in my education section. -- Paid research assistant jobs you had in grad school absolutely count as work experience and should be listed separately from your research and teaching if relevant to the types of jobs you're applying for. I listed my jobs ensuring the reproducibility of quantitative results for academic journals and supporting users of university high performance computing resources as there's a very short line between both of those job descriptions and many common data science tasks. -- If a job ad lists a skill and you have that skill, put it on your résumé, even if it's not one of your strongest skills. Your résumé will almost certainly be fed through an [applicant tracking system](https://en.wikipedia.org/wiki/Applicant_tracking_system), and the more matches the system finds, the higher the chance your résumé will end up in front of human eyes. -- I would take this a step further and do this in your cover letter as well. Does a job ad list a "solid understanding of relevant theories in machine learning, statistics, and probability theory" in the requirements? Then you'd better be prepared to talk about how you apply machine learning, statistics, and probability in your work. Does this feel a little like undergraduates trying to avoid plagiarism detection software by changing a few words here and there? Yes, but it's how hiring happens these days. - -# Things to do - -Below is a list of non-résumé-related things I did to prepare for and during my nonacademic job search that I found helpful: - -- As someone who (hubristically) deleted theirs the second year of grad school, it pains me to say that the most important thing you can do here is get yourself a LinkedIn. Get it looking as professional as your academic website. The first thing is to set the headline directly below your name to the type of job you're looking for. Want to be a research manager? List yourself as one and then talk about all the research assistants you coordinated. You'll have to do some reframing and shortening, but you can largely transfer over content from your academic website. I added publications and blog posts to the publications and projects section at the bottom of my profile, and I also added them as media items under my postdoc and PhD experiences where appropriate. Add a link or two with high quality preview images to the featured section at the top of your page. -- If you're applying for jobs now and you've taught a quantitative methods course at any point, get ready to talk about this. Every single interview asked me about a time where I had to explain a technical concept or project to a nontechnical audience, and teaching quantitative methods is nothing but that, multiple times a week, for an entire semester. Teaching statistics and programming is hard, so you'll also have lots of anecdotes ready when the interviewer asks a followup question about a time where you had to change your approach midway through a project. If you haven't taught quantitative methods yet and you're not already applying for jobs, do so if at all possible. -- Use your resources. I was fortunate enough to do my postdoc at an institution with an excellent career center that had multiple staff members with experience helping PhD students and postdocs get nonacademic jobs. However, even if your career center is less prepared to help you get a nonacademic job, lots of career centers have publicly available [online resources](https://students.wustl.edu/graduate-student-postdoc-career-resources) that can be very helpful. -- Use your networks. I talked with *many* people who work in data science and do not have degrees in computer science or statistics. This included two people from my undergraduate institution (one PhD in psychology, one in physics), multiple political science PhDs I met through Twitter and LinkedIn, and people who did data science masters and nonacademic data science bootcamps. Their experience and advice were invaluable for me in my job search process. -- Research salaries in the field you're applying to. You can get a broad sense of this through sites like Glassdoor, but ask the people I mentioned above about their starting salaries as well. They likely came from a similar background to you, and this information can be very useful when negotiating salary. You don't want to undersell yourself when an interviewer asks you your salary range. - -# Software skills - -Social science PhD programs are good at teaching research design, formal modeling, and statistical methodology. They spend far less time on what I'll call more supporting technical skills. Here are some suggestions in this domain based on my observations so far: - -- Don't try to learn everything there is to know about a cloud computing architecture. There are too many, and every company's implementation is subtly different. At my job, we use AWS, GCP, and Azure for various tasks, so learning one inside and out won't give you a huge advantage when applying. If you can generate SSH keys and copy them to a remote host, you're most of the way there. -- Learn some SQL, but don't worry about learning how to administer a database. If you can write queries that join multiple tables together and summarize by multiple groups, you're probably good. If you know the standard libraries for connecting to and querying a SQL database in Python and/or R, that's great. Again, depending on the individual database solution your job uses, you may have to use a very specific package to access it from your data science language. Mode has a free [tutorial](https://mode.com/sql-tutorial/introduction-to-sql) with an interactive interface that lets you write and run SQL queries in your browser that I found very helpful. -- Get some experience with shell scripting. I was first exposed to shell scripts because you had to write one to submit jobs on our university cluster in grad school. Data science often involves many moving parts, and being able to use some shell scripting to glue them all together can be incredibly useful. Software Carpentry has a pretty solid introductory [lesson](https://swcarpentry.github.io/shell-novice). -- I use git daily. While I rarely used git to manage collaboration with coauthors in academia, I used it to version control all of my solo-authored projects, and that provided a solid-enough background for my current level of usage. -- Automation is another important skill in the data science toolbox. Sometimes you'll have fancy GUI-based tools to set things up to run automatically, but other times it's faster and simpler to use a [cron jobs](https://en.wikipedia.org/wiki/Cron). I taught myself the basics of cron to keep the stats in my post [visualizing police militarization via the transfer of surplus armored vehicles to police departments](/posts/2020/06/visualizing-militarization/) automatically updated. - -# The social science PhD comparative advantage - -So far this post has mainly been oriented around a list of discrete things you can do to (potentially) improve your odds of securing a data science job as someone with a social science PhD. This last section reflects a perspective I developed throughout my job search process as I participated in more and more interviews, and I hope, will serve as a source of motivation for anyone pursuing a similar career transition. - -The vast majority of quantitative social science PhDs (myself very much included) are never going to be machine learning engineers who run neural networks all day long. Instead, we're going to be working with those engineers, running our own analyses (which might include some deep learning models, but plenty of other types of models as well), and also working with with less-technical stakeholders. - -Based on conversations with other data scientists and my experiences as a data scientist thus far, a large part of a data scientist's job is communicating the value of the work you and your more-technical team members have done to people with less technical training. Even if they have a strong background in statistics or research design more generally, they're still likely to be less familiar with your specific area of expertise. Communicating effectively in this situation requires distilling large amounts of information, drawing conclusions based on data, and then summarizing what you did, why you did it, and what you learned from doing it. To me, that sounds exactly like what social science PhD programs train their students to do.[^concrete] - -[^concrete]: To make this even more concrete: being able to communicate effectively with software engineers means that they help make your models more efficient with less work from you; being able to communicate with stakeholders means that you are more likely to get recognition for the work you did. +## Zabbix for web monitoring portfolio 3 discussion +Zabbix is an open-source monitoring software specially crafted and built for monitoring various IT infrastructure components, such as web-sites, servers, network devices, applications, and services. It provides a holistic and comprehensive suite of features for real-time monitoring, alerting, and visualization of system health and performance. Zabbix is particularly popular in enterprise environments due to its scalability, flexibility, and open-source nature, making it highly customizable and cost-effective. + +#### Key Features of Zabbix +- Real-time Monitoring: For network devices (switches), servers (physical and virtual servers, etc), applications (Web applications, databases, email systems, etc.), cloud environments (AWS, Azure, Google Cloud, etc.), storage devices (Disk usage, file systems, etc.). +- Data Collection: it consists of Agent-based monitoring (Zabbix agents are installed on monitored machines) and, Agentless monitoring (For devices or systems where agent installation is not possible or desirable, Zabbix can collect data via protocols like SNMP (Simple Network Management Protocol), IPMI (Intelligent Platform Management Interface), or SSH (Secure Shell). +- Visualization: It provides dashboards with customizable, real-time visual representations of the system’s health, graphs, and charts, as well as trend analysis of performance metrics such as CPU usage, memory usage, and network throughput. +- #### Other features +- Scalability, Auto-discovery, Security, Alerts and Notifications + +### Zabbix portfolio network segmentation (create a VM sandbox) +This a continuation and an extension from my [previous post](/posts/2023/03/nest-map) on 'A Basic Sandboxing Portfolio', we are going to consider an extended network segmentation for this portfolio study. + +#### Create an IP table for each machine on the network + +| Device | Role | IP Address | Subnet Mask | +|--------------|--------------|--------------|--------------| +| Desktop 1 VM | Management and deployment | 192.168.200.9 | 255.255.255.0 | +| Desktop 2 as Wordpress server VM | Management and deployment | 192.168.200.5 | 255.255.255.0 | +| Gateway Router VM (enp0s3) | Subnet 01 - Access to internet, Gateway and Desktop | 192.168.126.1 | 255.255.255.0 | +| Gateway Router VM (enp0s8) | Subnet 02 - Access to internet and Gateway Desktop 1 and Desktop Wordpress server| 192.168.200.1 | 255.255.255.0 | +| Gateway Router VM (enp0s9) | Internet Access to Ubuntu Gateway | 10.0.2.15 | 255.255.255.0 | +| Gateway Router VM (enp0s10) | Subnet 03 - Access to internet, Gateway and Desktop | 192.168.26.1 | 255.255.255.0 | +| Application Server VM | Server (Bitnami-Opencart) | 192.168.26.2 | 255.255.255.0 | + +- #### Desktop Configuration: + - #### Desktop 1 for Management and deployment +Go to setting and then click NETWORK and set ADPATER 1 to INTERNAL NETWORK, then click OK and click START to bootup the machine on the VM. + +After starting up the machine, go to Settings and click on WIRED SETTINGS then click on IDENTITY to setup the MAC Address to (08:00:27:97:75:31 (enp0s3)) then click IPV4 and set to Manual to set the ADDRESS, NETMASK, GATEWAY AND DNS ADDRESS: 192.168.200.9 NETMASK: 255.255.255.0 GATEWAY: 192.168.200.9 and DNS: 8.8.8.8, 8.8.4.4, 1.1.1.1 +Then click on APPLY and then DISCONNECT and RE-CONNECT the WIRED CONNECTION + +Open a command line terminal using “Ctrl + alt + T” and type “ip a” to see if IP address is set, if IP address is set continue, else re-do the above step and restart the Ubuntu desktop machine. + + - #### Desktop 2 for WORDPRESS mangement, installation and deployment +Go to setting and then click NETWORK and set ADPATER 1 to INTERNAL NETWORK, then clicked OK and click START to bootup the machine on the VM. + +After starting up the machine, go to Settings and click on WIRED SETTINGS then click on IDENTITY to setup the MAC Address to (08:00:27:97:75:31 (enp0s3)) then click IPV4 and set to Manual to set the ADDRESS, NETMASK, GATEWAY AND DNS ADDRESS: 192.168.200.5 NETMASK: 255.255.255.0 GATEWAY: 192.168.200.1 and DNS: 8.8.8.8, 1.1.1.1 +Then click on APPLY and then DISCONNECT and RE-CONNECT the WIRED CONNECTION + +Open a command line terminal using “Ctrl + alt + T” and type “ip a” to see if IP address is set, if IP address is set continue, else re-do the above step and restart the Ubuntu desktop machine. + +- #### **Command codes for installation of ZABBIX-AGENT and WORDPRESS** +
+

+1. sudo apt update
+2. sudo apt install -y zabbix-agent
+Enter the zabbix configuration file
+3. sudo nano /etc/zabbix/zabbix_agentd.conf
+Search manually for Server, ServerActive and Hostname (insert the host IP on which the server is stored for monitoring)
+4. Server=192.168.200.2
+5. ServerActive=192.168.200.2
+6. Hostname=Zabbix server [Zabbix server]
+7. sudo systemctl enable zabbix-agent [Enable from system start-up]
+8. sudo systemctl start zabbix-agent [To start agent installed]
+9. sudo systemctl status zabbix-agent [To check agent availability if active or not]
+ +

+
+ +- #### Prequisite and dependecies for WORDPRESS installation +To install WORDPRESS you need Apache2, Mysql, and Php
+ +- #### Steps to install Apache +
+

+1. sudo apt update
+2. sudo apt install apache2 -y [apache server installation]
+3. sudo systemctl restart apache2[To restart server]
+4. sudo systemctl status apache2[To check if active]
+5. sudo systemctl enable apache2[To start from boot-up]
+ +

+
+ +- #### Steps to install Mysql server for database management + +
+

+1. sudo apt update
+2. sudo apt install mysql-server
+3. sudo systemctl restart mysql [To restart the mysql]
+4. sudo systemctl start mysql [To start mysql installed]
+5. sudo systemctl enable mysql [To start mysql from system-boot up]
+6. sudo systemctl status mysql [To check mysql availability if active or not]
+7. sudo mysql_secure_installation [anwswer yes to all question in this section]
+8. sudo mysql -u root -p [To enter mysql database interface ]
+9. CREATE DATABASE wordpress; [in the mysql enviroment enter this script to build wordpress database]
+10. CREATE USER 'wordpressuser'@'localhost' IDENTIFIED BY 'your_password'; [To create user named 'wordpressuser' and set your password]
+11. GRANT ALL PRIVILEGES ON wordpress.* TO 'wordpressuser'@'localhost'; [granting privileges and access]
+12. FLUSH PRIVILEGES; [make all changes permanent]
+13. EXIT; [To quit or leave]
+

+
+ +- #### Steps to install PHP server for database management + +
+

+1. sudo apt update
+2. sudo apt install php libapache2-mod-php php-mysql php-cli php-xml php-curl php-json php-mbstring php-zip php-gd php-intl
[php and other dependecies installation]
+3. sudo apt install php-bcmath php-soap php-ldap php-imagick php-xsl php-opcache php-sqlite3 php-memcached php-redis
+4. sudo apt install php-fpm [Optional fpm-php for nginx]
+5. php -v [To verify]
+6. sudo a2enmod php [Configure apache to work with php]
+7. sudo systemctl restart apache2 [Restarting apache]
+ +

+
+ +- #### Steps to download install WORDPRESS + +
+

+1. cd /var/www/html
[Move into the html directory to download wordpress]
+2. sudo wget https://wordpress.org/latest.tar.gz
[To download wordpress installation scripts]
+3. sudo tar -xvzf latest.tar.gz
[Extract the install zip file]
+4. sudo rm latest.tar.gz [remove the archive zip file]
+5. cd wordpress [Move to wordpress directory to configure]
+6. sudo cp wp-config-sample.php wp-config.php + [Copy configuration file to create a new configuration file]
+7. sudo nano wp-config.php [Edit the wp-config.php file]
+Find and modify the following lines in the wp-config.php file to match the database settings you created earlier
+8. define( 'DB_NAME', 'wordpress' ); , define( 'DB_USER', 'wordpressuser' ); , define( 'DB_PASSWORD', 'your_password' ); , define( 'DB_HOST', 'localhost' );
+9. Save and exit the file (Ctrl + O, Enter, Ctrl + X).
+10. sudo chown -R www-data:www-data /var/www/html/wordpress [Change ownership of WordPress files to the Apache user (www-data)]
+11. sudo find /var/www/html/wordpress -type d -exec chmod 755 {} \; [Set appropriate permissions for WordPress files and directories]
+12. sudo nano /etc/apache2/sites-available/wordpress.conf [Create a new Apache virtual host configuration file for WordPress]
+13. Add this configuration +
+ ServerAdmin webmaster@localhost
+ DocumentRoot /var/www/html/wordpress
+ ServerName your_domain_or_IP
+
+ AllowOverride All
+ Require all granted
+

+ ErrorLog ${APACHE_LOG_DIR}/error.log
+ CustomLog ${APACHE_LOG_DIR}/access.log combined
+
[Add the following configuration (replace your_domain_or_IP with your domain or server IP)]
+ +14. sudo a2ensite wordpress.conf
[Enble WORDPRESS the default site]
+sudo a2dissite 000-default.conf
[disbale default site]
+15. sudo a2enmod rewrite
[Enable apache to write module]
+16. sudo systemctl restart apache2 [Restart apache2]
+ +Complete WORDPRESS installation online via Broswer (frontend)
+17. http://your_domain_or_IP [Use you ip address on you brower. e.g http://192.168.200.5]
+ +

+
+ +- #### Gateway(Router) Configuration: + + Go to settings and then click NETWORK and set ADAPTER 1 for (desktop 1) to INTERNAL NETWORK, ADAPTER 2 for (desktop 2) to INTERNAL NETWORK, ADAPTER 3 to NAT (Internet Access), and ADAPTER 4 to INTERNAL NETWORK for all 192.168.200.1 network.
+Then click OK and click START to bootup the machine on the VM
+After starting up the machine use:
+Then use: student@router:~$ **ip a** (to view all IPs)
+Set the network this way: view [previous post](/posts/2023/03/nest-map) for indentation +
+

+student@router:~$ sudo nano /etc/netplan/00-installer-config.yaml
+1.
+ network:
+ ethernets:
+ enp0s3:
+ dhcp4: no
+ addresses: [192.168.xxx.1/24] + enp0s8:
+ dhcp4: no
+ addresses: [192.168.xxx.1/24] + enp0s9:
+ dhcp4: true
+ addresses: []
+ enp0s8:
+ dhcp4: no
+ addresses: [192.168.xxx.1/24]
+version: 2
+ +2. sudo netplan apply [To apply changes]
+3. student@router:~$ ip a [To view all IP setting]
+4. sudo reboot
+ +

+
+**Note** : For final configuration on IP table routing follow this [LINK](https://moodle.roehampton.ac.uk/pluginfile.php/4873277/mod_resource/content/4/Setting%20Up%20a%20Ubuntu%20Gateway%20Router_v02.pdf)
+ +- #### Bitnami Opencart Server Configuration: + diff --git a/_posts/2023-03-04-nest-map.md b/_posts/2023-03-04-nest-map.md index 04aac46d2c0..1f5e5a14bb4 100644 --- a/_posts/2023-03-04-nest-map.md +++ b/_posts/2023-03-04-nest-map.md @@ -1,12 +1,12 @@ --- -title: Presenting results from an arbitrary number of models +title: Basic sandbox network with three (3) virtual machines output: md_document: variant: gfm+footnotes preserve_yaml: TRUE knit: (function(inputFile, encoding) { rmarkdown::render(inputFile, encoding = encoding, output_dir = "../_posts") }) -date: 2023-03-04 +date: 2024-12-04 permalink: /posts/2023/03/nest-map excerpt_separator: always_allow_html: true @@ -14,282 +14,206 @@ toc: true header: og_image: "posts/nest-map/fig-1.png" tags: - - tidyverse - - data-science - - visualization + - VirtualBox + - Ubuntu 22 desktop, Bitnami opencart, and Ubuntu gateway + - Networking + - Sandboxing --- -The combination of `tidyr::nest()` and `purrr:map()` can be used to -easily fit the same model to different subsets of a single dataframe. -There are [many](https://tidyr.tidyverse.org/articles/nest.html) -[tutorials](https://www.monicathieu.com/posts/2020-04-08-tidy-multilevel) -[available](https://r4ds.had.co.nz/many-models.html) to help guide you -through this process. There are substantially fewer (none I’ve been able -to find) that show you how to use these two functions to fit the same -model to different features from your dataframe. +## A basic network sandbox portfolio +Network sandboxing refers to creating an isolated environment within a network to test and evaluate network traffic, applications, or systems without affecting the broader infrastructure or production environment. +A sandboxed network provides an essential learning environment and platform, offering a secure, isolated space to simulate real-world network scenarios without introducing risks to live systems. For this Portfolio, i will create a walk through of my own private sandboxed virtual network using VirtualBox. The network will consist of multiple virtual machines (VMs) configured within a private IP address range. The aim is to gain an applied understanding of networking concepts, IP subnetting, network interface configuration, and a basic server setup, design, planning and organisation strategies. +Furthermore, creating a basic sandbox environment with a ubuntu desktop, ubuntu gateway, and Bitnami OpenCart is to provide a secure, isolated space for testing and experimentation without affecting production systems or real-world data. This type of sandbox setup is typically used to evaluate software, analyze vulnerabilities, or experiment with configurations in a controlled, risk-free manner. -While the former involves splitting your data into different subsets by -row, the latter involves cycling through different columns. I recently -confronted a problem where I had to run many models, including just one -predictor at a time from large pool of candidate predictors, while also -including a standard set of control variables in each.[^1] Given the -(apparent) absence of tutorials on fitting the same model to different -features from a dataframe using these functions, I decided to write up -the solution I reached in the hope it might be helpful to someone -else.[^2] Start by loading the following packages: - -``` r -library(tidyverse) -library(broom) -library(modelsummary) -library(kableExtra) -library(nationalparkcolors) -``` - -We’ll start with a recap of the subsetting approach, then build on it to -cycle through features instead of subsets of the data. This code is -similar to the [official tidyverse -tutorial](https://tidyr.tidyverse.org/articles/nest.html) above, but -pipes the output directly to a `ggplot()` call to visualize the results. - -``` r -mtcars %>% - nest(data = -cyl) %>% # split data by cylinders - mutate(mod = map(data, ~lm(mpg ~ disp + wt + am + gear, data = .x)), - out = map(mod, ~tidy(.x, conf.int = T))) %>% # tidy model to get coefs - unnest(out) %>% # unnest to access coefs - mutate(sig = sign(conf.low) == sign(conf.high), # p <= .05 - cyl = as.factor(cyl)) %>% # factor for nicer plotting - filter(term == 'disp') %>% - ggplot(aes(x = cyl, y = estimate, ymin = conf.low, ymax = conf.high, - color = sig)) + - geom_pointrange() + - geom_hline(yintercept = 0, lty = 2, color = 'grey60') + - scale_color_manual(name = 'Statistical significance', - labels = str_to_title, - values = park_palette('Saguaro')) + - labs(x = 'Cylinders', y = "Coefficient estimate") + - theme_bw() + - theme(legend.position = 'bottom') -``` - - - -# Multiple predictors - -The first thing we have to do is create a custom fuction because we now -need to be able to specify different predictors in different runs of the -model. The code below is very similar to the code above, except that -we’re defining the formula in `lm()` via the `formula()` function, which -parses a character object that we’ve assembled via `str_c()`. The net -effect of this is to fit a model where the `pred` argmument to -`func_var()` is the first predictor. This lets us use an external -function to supply different values to `pred`. Then we use -`broom::tidy()` to create a tidy dataframe of point estimates and -measures of uncertainty from the model and store them in a variable -called `out`. Finally, `mutate(pred = pred)` creates a variable named -`pred` in the output dataframe that records what the predictor used to -fit the model was. We could retrieve this from the `mod` list-column, -but this is approach is simpler both to extract the predictor -programtically and to visually inspect the data. We use then -`purr::map_dfr()` to generate a dataframe where each row corresponds to -a model with with a different predictor. - -``` r -func_var <- function(pred, dataset) { +Here's a breakdown of each component's role in achieving this aim: + +### Desktop Environment (Local Workstation) +The desktop serves as the local machine where the sandboxing process begins. It's a safe and isolated environment from which users can interact with the sandboxed components without compromising the underlying system or other networked systems. + +### Aim: +- Isolation: The desktop acts as the point of interaction with the sandbox environment, allowing users to control and monitor the sandbox without directly interacting with the host operating system or network. +- Testing: Allows developers and testers to execute software, make changes, and test functionality without impacting critical systems. +- Configuration and Deployment: The desktop is typically where the OpenCart application (via Bitnami's pre-packaged software) is configured and deployed, setting up a virtualized or containerized environment. + +### Gateway (Network Bridge) +The gateway acts as the network entry point, providing connectivity between the sandboxed environment and external networks (such as the internet or other isolated networked environments). This is typically managed via a firewall or network segmentation strategy, ensuring traffic flow is controlled and can be monitored. + +### Aim: +- Controlled Connectivity: The gateway allows the sandbox to connect to the internet or other external resources (for example, to access APIs, external databases, or perform updates), while also maintaining security and isolation from the broader network. +- Network Monitoring: Traffic passing through the gateway can be closely monitored to detect any unusual activity, vulnerabilities, or potential threats. +- Access Control: The gateway enforces access controls, ensuring that the sandboxed environment only communicates with allowed external systems or services, preventing any unwanted or malicious data leaks. + +### Bitnami OpenCart (Application Layer) +Bitnami OpenCart is an easy-to-install, pre-packaged version of the OpenCart e-commerce platform that can be run on various environments (virtual machines, containers, or cloud platforms). In this sandbox, it serves as the application being tested or experimented with. + +### Aim: +- Testing Application Configurations: OpenCart can be used in the sandbox to test new plugins, themes, updates, or integrations, such as payment gateways or shipping modules. These tests are done in a safe environment, preventing any potential issues from affecting a live production store. +- Performance and Load Testing: Sandboxing OpenCart in an isolated environment allows the simulation of real-world traffic or user interactions to evaluate the platform’s performance under various conditions. +- Security Analysis: This setup can be used to evaluate potential security vulnerabilities, such as testing OpenCart’s response to security exploits (e.g., SQL injections, cross-site scripting, etc.), and harden the system before deploying it to production. +- Simulated eCommerce Operations: The sandbox environment allows administrators to simulate real-world business operations—such as inventory management, payment processing, and order fulfillment—without any real financial or operational risks. + +### Steps +- Understand the network analysis - dataset %>% - nest(data = everything()) %>% - mutate(mod = map(data, ~lm(formula(str_c('mpg ~ ' , pred, # substitute pred - ' + wt + am + gear')), - data = .x)), - out = map(mod, ~tidy(.x, conf.int = T))) %>% - mutate(pred = pred) %>% - return() + -} - -## predictors of interest -preds <- c('disp', 'hp', 'drat') - -## fit models with different predictors -mods_var <- map_dfr(preds, function(x) func_var(x, mtcars)) - -## inspect -mods_var -``` - - ## # A tibble: 3 × 4 - ## data mod out pred - ## - ## 1 disp - ## 2 hp - ## 3 drat - -## Plots - -You can see our original dataframe that we condensed down into `data` -with `nest()`, the model object in `mod`, the tidied model output in -`out`, and finally the predictor used to fit the model in `pred`. Using -`unnest()`, we can unnest the `out` object and get a dataframe we can -use to plot the main coefficient estimate from each of our three models. - -``` r -mods_var %>% - unnest(out) %>% - mutate(sig = sign(conf.low) == sign(conf.high)) %>% - filter(term %in% preds) %>% - ggplot(aes(x = term, y = estimate, ymin = conf.low, ymax = conf.high, - color = sig)) + - geom_pointrange() + - geom_hline(yintercept = 0, lty = 2, color = 'grey60') + - scale_color_manual(name = 'Statistical significance', - labels = str_to_title, - values = park_palette('Saguaro')) + - labs(x = 'Predictor', y = "Coefficient estimate") + - theme_bw() + - theme(legend.position = 'bottom') -``` - - - -## Tables - -Things get slightly more complicated when we want to represent our -results textually instead of visually. We can use the excellent -`modelsummary::modelsummary()` function to create our table, but we need -to supply a list of model objects, rather than the unnested dataframe we -created above to plot the results. We can use the `split()` function to -turn our dataframe into a list, and by using `split(seq(nrow(.)))`, -we’ll create one list item for each row in our dataframe. - -Since each list item will be a one row dataframe, we can use `lapply()` -to cycle through the list. The `mod` object in each one row dataframe is -itself a list-column, so we need to index it with `[[1]]` to properly -access the model object itself.[^3] The last step is a call to -`unname()`, which will drop the automatically generated list item names -of `1`, `2`, and `3`, allowing `modelsummary()` to use the default names -for each model column in the output. - -``` r -tab_coef_map = c('disp' = 'Displacement', # format coefficient labels - 'hp' = 'Horsepower', - 'drat' = 'Drive ratio', - 'wt' = 'Weight (1000 lbs)', - 'am' = 'Manual', - 'gear' = 'Gears', - '(Intercept)' = '(Intercept)') - -mods_var %>% - split(seq(nrow(.))) %>% # list where each object is a one row dataframe - lapply(function(x) x$mod[[1]]) %>% # extract model from data dataframe - unname() %>% # remove names for default names in table - modelsummary(coef_map = tab_coef_map, stars = c('*' = .05)) -``` - - - -# Bonus - -Now, let’s combine both approaches. We’re going to be splitting our -dataframe into three sub-datasets by number of cylinders while *also* -fitting the same model three times with `'disp'`, `'hp'`, and `'drat'` -as predictors. The only changes to `func_var()` are to omit `cyl` from -the nesting, and to recode it as a factor to treat it as discrete axis -labels. - -``` r -func_var_obs <- function(pred, dataset) { +- Make a sketch on packet-tracer: + + Draw a rough sketch of your network diagram using either a cisco newtwork packet-tracer, pen and paper, or gns3 on ubuntu + +- Create an IP table for the machines according to the network diagram and sketch + +| Device | Role | IP Address | Subnet Mask | +|--------------|--------------|--------------|--------------| +| Desktop VM | Management and deployment | 192.168.126.2 | 255.255.255.0 | +| Gateway Router VM (enp0s3) | Internet Access to Ubuntu Gateway | 10.0.2.15 | 255.255.255.0 | +| Gateway Router VM (enp0s8) | Subnet 01- Access to internet, Gateway and Bitnami Opencart| 192.168.126.1 | 255.255.255.0 | +| Gateway Router VM (enp0s9) | Subnet 02- Access to internet, Gateway and Desktop | 192.168.26.1 | 255.255.255.0 | +| Application Server VM | Server (Bitnami-Opencart) | 192.168.26.2 | 255.255.255.0 | + +- #### Desktop Configuration: - dataset %>% - nest(data = -cyl) %>% - mutate(mod = map(data, ~lm(formula(str_c('mpg ~ ' , pred, - ' + wt + am + gear')), - data = .x)), - out = map(mod, ~tidy(.x, conf.int = T)), - cyl = as.factor(cyl), - pred = pred) %>% - select(-data) %>% - return() + Go to setting and then click NETWORK and set ADPATER 1 to INTERNAL NETWORK, then clicked OK and click START to bootup the machine on the VM. + + + +After starting up the machine, go to Settings and click on WIRED SETTINGS then click on IDENTITY to setup the MAC Address to (08:00:27:97:75:31 (enp0s3)) then click IPV4 and set to Manual to set the ADDRESS, NETMASK, GATEWAY AND DNS ADDRESS: 192.168.126.2 NETMASK: 255.255.255.0 GATEWAY: 192.168.126.1 and DNS: 8.8.8.8, 1.1.1.1 +Then click on APPLY and then DISCONNECT and RE-CONNECT the WIRED CONNECTION + + + +Open a command line terminal using “Ctrl + alt + T” and type “ip a” to see if IP address is set, if IP address is set continue, else re-do the above step and restart the Ubuntu desktop machine. + + + +- #### Gateway(Router) Configuration: + Go to settings and then click NETWORK and set ADAPTER 1 to NAT, ADAPTER 2 for (desktop) to INTERNAL NETWORK and ADAPTER 3 for (opencart) to INTERNAL NETWORK + + + + +Then click OK and click START to bootup the machine on the VM + +After starting up the machine use: + +**student@router:~$ sudo nano /etc/netplan/00-installer-config.yaml** +to enter the network interface to configure the ADDRESS, NETMASK, and GATEWAY for all three (3) ADAPTERS. + +Then use: student@router:~$ **sudo netplan apply** (to apply new configuration) + +Then use: student@router:~$ **ip a** (to view all IPs) + + +- #### Bitnami Opencart Server Configuration: + Go to settings and then click NETWORK and set ADPATER 1 to INTERNAL NETWORK, then click OK and click START to bootup the machine on the VM + + +After starting up the machine use: +bitnami@debian:~$ **sudo nano /etc/network/interfaces** +to enter the network interface to configure the ADDRESS, NETMASK, and GATEWAY. + +ADDRESS: 192.168.26.2 NETMASK: 255.255.255.0 GATEWAY: 192.168.126.1 + + + +Then use **Ctrl + X** to save by pressing Y when asked do you want to save settings? +Use: bitnami@debian:~$ **sudo reboot now** + +After reboot use: bitnami@debian:~$ **ip a** to see if IP Address is set. + + + +### Machines Functionality +This is the interaction and communication between all three (3) machines + +- #### Desktop Funtionality +Ping from desktop (192.168.126.2) to bitnami opencart application server (192.168.26.2) + + + +Ping from desktop (192.168.126.2) to ubuntu gateway server (192.168.126.1) + + + +Access the opencart e-commerce site using the desktop web browser and IP address 192.168.26.2 + + + +Access the google site for internet connect on the desktop web browser using www.google.com + + + +Using ssh@192.168.126.1 to access the gateway from the desktop + + + +- #### Gateway Functionality +Ping from ubuntu gateway (192.168.126.1) to desktop application (192.168.126.2) + + + +Ping from ubuntu gateway (192.168.26.1) to bitnami opencart application server (192.168.26.2) + + + +Using nslookup bbc.co.uk to resolve bbc's ramdom IP addresses + + + +- #### Bitnami Opencart Functionality +Ping from opencart (192.168.26.2) to desktop application (192.168.126.2) + + + +Ping from bitnami opencart application server (192.168.26.2) to ubuntu gateway (192.168.26.1) + + + +### Video +If you find it difficult to play or watch this video directly, please click the **Watch Video Link** below +