Agenda :
yum install -y wget yum install -y libxkbcommon-x11 yum install -y xcb-util-keysyms -y yum install -y xcb-util-wm yum install -y xcb-util-image yum install -y xcb-util-keysyms yum install -y xcb-util-renderutil yum install -y freeglut-devel
Install using 'wsl --install -d'. NAME FRIENDLY NAME Ubuntu Ubuntu Debian Debian GNU/Linux kali-linux Kali Linux Rolling Ubuntu-18.04 Ubuntu 18.04 LTS Ubuntu-20.04 Ubuntu 20.04 LTS Ubuntu-22.04 Ubuntu 22.04 LTS Ubuntu-24.04 Ubuntu 24.04 LTS OracleLinux_7_9 Oracle Linux 7.9 OracleLinux_8_7 Oracle Linux 8.7 OracleLinux_9_1 Oracle Linux 9.1 openSUSE-Leap-15.6 openSUSE Leap 15.6 SUSE-Linux-Enterprise-15-SP5 SUSE Linux Enterprise 15 SP5 SUSE-Linux-Enterprise-15-SP6 SUSE Linux Enterprise 15 SP6 openSUSE-Tumbleweed openSUSE Tumbleweed
Username : me Password : wachtwoord_voor_me
[network] generateResolvConf = false localhostForwarding = true [boot] systemd=true
Oracle Linux 9 dnf install -y oracle-epel-release-el9 dnf config-manager --enable ol9_developer_EPEL Oracle Linx 8 : yum install -y oracle-epel-release-el8 Oracle Linx 8 : yum install -y yum-utils; yum-config-manager --enable ol8_baseos_latest ol8_appstream ol8_addons ol8_developer_EPEL dnf install -y podman dnf install -y podman-compose # Eventueel, als nog niet eerder : dnf install -y xrdp systemctl enable xrdp systemctl start xrdp
# This script installs Ollama on Linux.
# It detects the current operating system architecture and installs the appropriate version of Ollama.
set -eu
red="$( (/usr/bin/tput bold || :; /usr/bin/tput setaf 1 || :) 2>&-)"
plain="$( (/usr/bin/tput sgr0 || :) 2>&-)"
status() { echo ">>> $*" >&2; }
error() { echo "${red}ERROR:${plain} $*"; exit 1; }
warning() { echo "${red}WARNING:${plain} $*"; }
TEMP_DIR=$(mktemp -d)
cleanup() { rm -rf $TEMP_DIR; }
trap cleanup EXIT
available() { command -v $1 >/dev/null; }
require() {
local MISSING=''
for TOOL in $*; do
if ! available $TOOL; then
MISSING="$MISSING $TOOL"
fi
done
echo $MISSING
}
[ "$(uname -s)" = "Linux" ] || error 'This script is intended to run on Linux only.'
ARCH=$(uname -m)
case "$ARCH" in
x86_64) ARCH="amd64" ;;
aarch64|arm64) ARCH="arm64" ;;
*) error "Unsupported architecture: $ARCH" ;;
esac
IS_WSL2=false
KERN=$(uname -r)
case "$KERN" in
*icrosoft*WSL2 | *icrosoft*wsl2) IS_WSL2=true;;
*icrosoft) error "Microsoft WSL1 is not currently supported. Please use WSL2 with 'wsl --set-version 2'" ;;
*) ;;
esac
VER_PARAM="${OLLAMA_VERSION:+?version=$OLLAMA_VERSION}"
SUDO=
if [ "$(id -u)" -ne 0 ]; then
# Running as root, no need for sudo
if ! available sudo; then
error "This script requires superuser permissions. Please re-run as root."
fi
SUDO="sudo"
fi
NEEDS=$(require curl awk grep sed tee xargs)
if [ -n "$NEEDS" ]; then
status "ERROR: The following tools are required but missing:"
for NEED in $NEEDS; do
echo " - $NEED"
done
exit 1
fi
for BINDIR in /usr/local/bin /usr/bin /bin; do
echo $PATH | grep -q $BINDIR && break || continue
done
OLLAMA_INSTALL_DIR=$(dirname ${BINDIR})
if [ -d "$OLLAMA_INSTALL_DIR/lib/ollama" ] ; then
status "Cleaning up old version at $OLLAMA_INSTALL_DIR/lib/ollama"
$SUDO rm -rf "$OLLAMA_INSTALL_DIR/lib/ollama"
fi
status "Installing ollama to $OLLAMA_INSTALL_DIR"
$SUDO install -o0 -g0 -m755 -d $BINDIR
$SUDO install -o0 -g0 -m755 -d "$OLLAMA_INSTALL_DIR"
status "Downloading Linux ${ARCH} bundle"
curl --fail --show-error --location --progress-bar \
"https://ollama.com/download/ollama-linux-${ARCH}.tgz${VER_PARAM}" | \
$SUDO tar -xzf - -C "$OLLAMA_INSTALL_DIR"
if [ "$OLLAMA_INSTALL_DIR/bin/ollama" != "$BINDIR/ollama" ] ; then
status "Making ollama accessible in the PATH in $BINDIR"
$SUDO ln -sf "$OLLAMA_INSTALL_DIR/ollama" "$BINDIR/ollama"
fi
# Check for NVIDIA JetPack systems with additional downloads
if [ -f /etc/nv_tegra_release ] ; then
if grep R36 /etc/nv_tegra_release > /dev/null ; then
status "Downloading JetPack 6 components"
curl --fail --show-error --location --progress-bar \
"https://ollama.com/download/ollama-linux-${ARCH}-jetpack6.tgz${VER_PARAM}" | \
$SUDO tar -xzf - -C "$OLLAMA_INSTALL_DIR"
elif grep R35 /etc/nv_tegra_release > /dev/null ; then
status "Downloading JetPack 5 components"
curl --fail --show-error --location --progress-bar \
"https://ollama.com/download/ollama-linux-${ARCH}-jetpack5.tgz${VER_PARAM}" | \
$SUDO tar -xzf - -C "$OLLAMA_INSTALL_DIR"
else
warning "Unsupported JetPack version detected. GPU may not be supported"
fi
fi
install_success() {
status 'The Ollama API is now available at 127.0.0.1:11434.'
status 'Install complete. Run "ollama" from the command line.'
}
trap install_success EXIT
# Everything from this point onwards is optional.
configure_systemd() {
if ! id ollama >/dev/null 2>&1; then
status "Creating ollama user..."
$SUDO useradd -r -s /bin/false -U -m -d /usr/share/ollama ollama
fi
if getent group render >/dev/null 2>&1; then
status "Adding ollama user to render group..."
$SUDO usermod -a -G render ollama
fi
if getent group video >/dev/null 2>&1; then
status "Adding ollama user to video group..."
$SUDO usermod -a -G video ollama
fi
status "Adding current user to ollama group..."
$SUDO usermod -a -G ollama $(whoami)
status "Creating ollama systemd service..."
# cat </dev/null
#[Unit]
#Description=Ollama Service
#After=network-online.target
#
#[Service]
#ExecStart=$BINDIR/ollama serve
#User=ollama
#Group=ollama
#Restart=always
#RestartSec=3
#Environment="PATH=$PATH"
#
#[Install]
#WantedBy=default.target
#EOF
# SYSTEMCTL_RUNNING="$(systemctl is-system-running || true)"
# case $SYSTEMCTL_RUNNING in
# running|degraded)
# status "Enabling and starting ollama service..."
# $SUDO systemctl daemon-reload
# $SUDO systemctl enable ollama
#
# start_service() { $SUDO systemctl restart ollama; }
# trap start_service EXIT
# ;;
# *)
# warning "systemd is not running"
# if [ "$IS_WSL2" = true ]; then
# warning "see https://learn.microsoft.com/en-us/windows/wsl/systemd#how-to-enable-systemd to enable it"
# fi
# ;;
# esac
echo "STARTING SERVE"
/usr/local/bin/ollama serve &
}
#if available systemctl; then
configure_systemd
#fi
# WSL2 only supports GPUs via nvidia passthrough
# so check for nvidia-smi to determine if GPU is available
if [ "$IS_WSL2" = true ]; then
if available nvidia-smi && [ -n "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")" ]; then
status "Nvidia GPU detected."
fi
install_success
exit 0
fi
# Don't attempt to install drivers on Jetson systems
if [ -f /etc/nv_tegra_release ] ; then
status "NVIDIA JetPack ready."
install_success
exit 0
fi
# Install GPU dependencies on Linux
if ! available lspci && ! available lshw; then
warning "Unable to detect NVIDIA/AMD GPU. Install lspci or lshw to automatically detect and install GPU dependencies."
exit 0
fi
check_gpu() {
# Look for devices based on vendor ID for NVIDIA and AMD
case $1 in
lspci)
case $2 in
nvidia) available lspci && lspci -d '10de:' | grep -q 'NVIDIA' || return 1 ;;
amdgpu) available lspci && lspci -d '1002:' | grep -q 'AMD' || return 1 ;;
esac ;;
lshw)
case $2 in
nvidia) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[10DE\]' || return 1 ;;
amdgpu) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[1002\]' || return 1 ;;
esac ;;
nvidia-smi) available nvidia-smi || return 1 ;;
esac
}
if check_gpu nvidia-smi; then
status "NVIDIA GPU installed."
exit 0
fi
if ! check_gpu lspci nvidia && ! check_gpu lshw nvidia && ! check_gpu lspci amdgpu && ! check_gpu lshw amdgpu; then
install_success
warning "No NVIDIA/AMD GPU detected. Ollama will run in CPU-only mode."
exit 0
fi
if check_gpu lspci amdgpu || check_gpu lshw amdgpu; then
status "Downloading Linux ROCm ${ARCH} bundle"
curl --fail --show-error --location --progress-bar \
"https://ollama.com/download/ollama-linux-${ARCH}-rocm.tgz${VER_PARAM}" | \
$SUDO tar -xzf - -C "$OLLAMA_INSTALL_DIR"
install_success
status "AMD GPU ready."
exit 0
fi
CUDA_REPO_ERR_MSG="NVIDIA GPU detected, but your OS and Architecture are not supported by NVIDIA. Please install the CUDA driver manually https://docs.nvidia.com/cuda/cuda-installation-guide-linux/"
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-7-centos-7
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-8-rocky-8
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-9-rocky-9
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#fedora
install_cuda_driver_yum() {
status 'Installing NVIDIA repository...'
case $PACKAGE_MANAGER in
yum)
$SUDO $PACKAGE_MANAGER -y install yum-utils
if curl -I --silent --fail --location "https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo" >/dev/null ; then
$SUDO $PACKAGE_MANAGER-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo
else
error $CUDA_REPO_ERR_MSG
fi
;;
dnf)
if curl -I --silent --fail --location "https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo" >/dev/null ; then
$SUDO $PACKAGE_MANAGER config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-$1$2.repo
else
error $CUDA_REPO_ERR_MSG
fi
;;
esac
case $1 in
rhel)
status 'Installing EPEL repository...'
# EPEL is required for third-party dependencies such as dkms and libvdpau
$SUDO $PACKAGE_MANAGER -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-$2.noarch.rpm || true
;;
esac
status 'Installing CUDA driver...'
if [ "$1" = 'centos' ] || [ "$1$2" = 'rhel7' ]; then
$SUDO $PACKAGE_MANAGER -y install nvidia-driver-latest-dkms
fi
$SUDO $PACKAGE_MANAGER -y install cuda-drivers
}
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#ubuntu
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#debian
install_cuda_driver_apt() {
status 'Installing NVIDIA repository...'
if curl -I --silent --fail --location "https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-keyring_1.1-1_all.deb" >/dev/null ; then
curl -fsSL -o $TEMP_DIR/cuda-keyring.deb https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m | sed -e 's/aarch64/sbsa/')/cuda-keyring_1.1-1_all.deb
else
error $CUDA_REPO_ERR_MSG
fi
case $1 in
debian)
status 'Enabling contrib sources...'
$SUDO sed 's/main/contrib/' < /etc/apt/sources.list | $SUDO tee /etc/apt/sources.list.d/contrib.list > /dev/null
if [ -f "/etc/apt/sources.list.d/debian.sources" ]; then
$SUDO sed 's/main/contrib/' < /etc/apt/sources.list.d/debian.sources | $SUDO tee /etc/apt/sources.list.d/contrib.sources > /dev/null
fi
;;
esac
status 'Installing CUDA driver...'
$SUDO dpkg -i $TEMP_DIR/cuda-keyring.deb
$SUDO apt-get update
[ -n "$SUDO" ] && SUDO_E="$SUDO -E" || SUDO_E=
DEBIAN_FRONTEND=noninteractive $SUDO_E apt-get -y install cuda-drivers -q
}
if [ ! -f "/etc/os-release" ]; then
error "Unknown distribution. Skipping CUDA installation."
fi
. /etc/os-release
OS_NAME=$ID
OS_VERSION=$VERSION_ID
PACKAGE_MANAGER=
for PACKAGE_MANAGER in dnf yum apt-get; do
if available $PACKAGE_MANAGER; then
break
fi
done
if [ -z "$PACKAGE_MANAGER" ]; then
error "Unknown package manager. Skipping CUDA installation."
fi
if ! check_gpu nvidia-smi || [ -z "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")" ]; then
case $OS_NAME in
centos|rhel) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -d '.' -f 1) ;;
rocky) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -c1) ;;
fedora) [ $OS_VERSION -lt '39' ] && install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME '39';;
amzn) install_cuda_driver_yum 'fedora' '37' ;;
debian) install_cuda_driver_apt $OS_NAME $OS_VERSION ;;
ubuntu) install_cuda_driver_apt $OS_NAME $(echo $OS_VERSION | sed 's/\.//') ;;
*) exit ;;
esac
fi
if ! lsmod | grep -q nvidia || ! lsmod | grep -q nvidia_uvm; then
KERNEL_RELEASE="$(uname -r)"
case $OS_NAME in
rocky) $SUDO $PACKAGE_MANAGER -y install kernel-devel kernel-headers ;;
centos|rhel|amzn) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE kernel-headers-$KERNEL_RELEASE ;;
fedora) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE ;;
debian|ubuntu) $SUDO apt-get -y install linux-headers-$KERNEL_RELEASE ;;
*) exit ;;
esac
NVIDIA_CUDA_VERSION=$($SUDO dkms status | awk -F: '/added/ { print $1 }')
if [ -n "$NVIDIA_CUDA_VERSION" ]; then
$SUDO dkms install $NVIDIA_CUDA_VERSION
fi
if lsmod | grep -q nouveau; then
status 'Reboot to complete NVIDIA CUDA driver install.'
exit 0
fi
$SUDO modprobe nvidia
$SUDO modprobe nvidia_uvm
fi
# make sure the NVIDIA modules are loaded on boot with nvidia-persistenced
if available nvidia-persistenced; then
$SUDO touch /etc/modules-load.d/nvidia.conf
MODULES="nvidia nvidia-uvm"
for MODULE in $MODULES; do
if ! grep -qxF "$MODULE" /etc/modules-load.d/nvidia.conf; then
echo "$MODULE" | $SUDO tee -a /etc/modules-load.d/nvidia.conf > /dev/null
fi
done
fi
status "NVIDIA GPU ready."
install_success
echo "DONE "
export PODMAN_IGNORE_CGROUPSV1_WARNING=1 podman stop Ollama podman rm Ollama podman run --name=Ollama -p 11434:11434 -v /home/podman/ollama:/ollama:z -t -d container-registry.oracle.com/os/oraclelinux:8 /bin/bash -c "yum install -y tar net-tools && sleep infinity" echo "Wait for linux to start " sleep 90 podman exec -d Ollama /bin/bash -c "chmod +x /ollama/install.sh && sh /ollama/install.sh " echo "Wait for ollama to start serving " sleep 180 podman exec -d Ollama /bin/bash -c " export OLLAMA_HOST=0.0.0.0 && /usr/local/bin/ollama serve && sleep infinity " sleep 120 #podman exec -d Ollama /bin/bash -c " /usr/local/bin/ollama pull llama3.2:1b " podman exec -d Ollama /bin/bash -c " /usr/local/bin/ollama pull tinydolphin " sleep 120 #echo "interactive ollama run llama3.2:1b 'Hello'" echo "interactive ollama run tinydolphin 'Hello'" echo " podman exec -ti Ollama /bin/bash "
podman exec -ti Ollama /bin/bash echo "command line version: " #ollama run llama3.2:1b "hello" ollama run tinydolphin "hello" echo "Interactive version " echo " Search and use /bye to stop " #ollama run llama3.2:1b ollama run tinydolphin
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
6956 ollama 20 0 23.0g 20.6g 11576 S 1567 32.9 38:40.44 ollama_lla+
Error: model requires more system memory (134.7 GiB) than is available (92.8 GiB)
Hello! How can I assist you today? 😊 total duration: 9.351457242s load duration: 24.059731ms prompt eval count: 4 token(s) prompt eval duration: 788ms prompt eval rate: 5.08 tokens/s eval count: 16 token(s) eval duration: 8.537s eval rate: 1.87 tokens/s
Dolphin is happy to hear that you've reached out for assistance. Please feel free to share more details about the situation or tasks you would like me to assist with. total duration: 1.03732973s load duration: 7.514242ms prompt eval count: 30 token(s) prompt eval duration: 27ms prompt eval rate: 1111.11 tokens/s eval count: 37 token(s) eval duration: 1.001s eval rate: 36.96 tokens/s
podman inspect --format='{{.NetworkSettings.IPAddress}}' Open
podman inspect --format='{{.NetworkSettings.IPAddress}}' Ollama
[root@DESKTOP-2V6PGAI ~]# podman inspect --format='{{.NetworkSettings.IPAddress}}' Open
10.88.0.29
[root@DESKTOP-2V6PGAI ~]# podman inspect --format='{{.NetworkSettings.IPAddress}}' Ollama
10.88.0.2
export PODMAN_IGNORE_CGROUPSV1_WARNING=1 podman stop Open podman rm -f Open mkdir /home/podman/Open podman run -d -e WEBUI_AUTH=False -p 3000:8080 -e OLLAMA_BASE_URL=http://10.88.0.2:11434 -v /home/podman/Open:/app/backend/data --name Open ghcr.io/open-webui/open-webui:main echo " Sleep while starting OpenWebUI " sleep 60 podman exec -t Open /bin/bash -c "apt update -y && apt install -y net-tools curl wget inetutils-ping procps" echo " Sleep while starting OpenWebUI " sleep 60 systemctl stop firewalld iptables -F echo "Open browser : http://localhost:3000 "
Debug in open-web ui die een debian omgeving is:
[Unit] Description=Ollama Service After=network-online.target [Service] ExecStart=/usr/local/bin/ollama serve User=ollama Group=ollama Restart=always RestartSec=3 Environment="PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin" Environment="OLLAMA_HOST=0.0.0.0" [Install] WantedBy=default.target
export PODMAN_IGNORE_CGROUPSV1_WARNING=1 podman stop Node; podman rm Node; mkdir -p /home/podman/Node podman run --name=Node -v /home/podman/Node:/Node:z -t -d container-registry.oracle.com/os/oraclelinux:8 /bin/bash -c " sleep infinity " echo "Start: " echo "docker exec -ti Node /bin/bash" podman exec -ti Node /bin/bash -c " dnf install nodejs -y dnf install npm -y node -v npm -i nodejs echo ' RUN : podman exec -ti Node /bin/bash ' "
yum install -y atk alsa-lib.x86_64 lib-atk atk.x86_64 cups-libs.x86_64 gtk3.x86_64 ipa-gothic-fonts libXcomposite.x86_64 libXcursor.x86_64 libXdamage.x86_64 libXext.x86_64 libXi.x86_64 libXrandr.x86_64 libXScrnSaver.x86_64 libXtst.x86_64 pango.x86_64 xorg-x11-fonts-100dpi xorg-x11-fonts-75dpi xorg-x11-fonts-cyrillic xorg-x11-fonts-misc xorg-x11-fonts-Type1 xorg-x11-utils yum install -y nss yum install -y atk java-atk-wrapper at-spi2-atk gtk3 libXt libdrm mesa-libgbm java-atk-wrapper at-spi2-atk gtk3 libXt dnf install -y atk cups-libs libdrm at-spi2-core libX11 libXcomposite libXdamage libXext libXfixes libXrandr mesa-libgbm libxcb libxkbcommon pango cairo alsa-lib at-spi2-atk npm install axios npm install cheerio npm install puppeteer npm install apify npm install crawlee export CRAWLEE_DISABLE_BROWSER_SANDBOX=1 node ap.js
import * as fs from 'fs';
import { Actor } from 'apify';
import { CheerioCrawler, RequestQueue, htmlToText, downloadListOfUrls } from 'crawlee';
var sitemapUrls=[];
var counter=0;
var server="photoquesting.com";
var path="TODO";
server="www.reclassering.nl";
path="sitemap_index.xml";
//http://www.reclassering.nl/sitemap_index.xml
const requestQueue = await RequestQueue.open();
await Actor.init();
const crawler = new CheerioCrawler({
minConcurrency: 1, // Minimal concurrency to avoid overwhelming the server
maxConcurrency: 1, // Ensures requests happen one at a time
requestHandlerTimeoutSecs: 30,
maxRequestsPerCrawl: 10000,
maxRequestRetries: 5, // Retry requests up to 5 times
// Add delay between requests
preNavigationHooks: [
async () => {
const delay = Math.random() * (2000 - 1000) + 1000; // Random delay between 1-2 seconds
console.log(`Delaying request by ${Math.round(delay)} ms...`);
await new Promise((resolve) => setTimeout(resolve, delay));
},
],
requestHandler: async ({ request, body, enqueueLinks }) => {
if (request.url.indexOf("sitemap") < 0) {
if(request.url.indexOf("pdf") < 0 && request.url.indexOf("png") < 0 && request.url.indexOf("gif") < 0 ){
console.log("Write counter: ", counter);
const text = htmlToText(body);
fs.writeFileSync(`data/${counter}.txt`, text);
counter++;
}
} else {
const dataSplit = body.split("");
for (let d = 0; d < dataSplit.length; d++) {
const goUrl = dataSplit[d];
if (!goUrl) continue;
const url = goUrl.split(" ")[0];
if (url.indexOf("xml") > -1) continue;
await requestQueue.addRequest({ url });
}
}
// Add all links from the page to the request queue
await enqueueLinks();
},
failedRequestHandler: async ({ request }) => {
console.error(`Request ${request.url} failed multiple times.`);
},
});
// Fetch list of URLs
const listOfUrls = await downloadListOfUrls({ url: `http://${server}/${path}` });
const browseUrls = listOfUrls.filter((u) => u && u.indexOf(server) >= 0);
console.log("browserUrls: ", browseUrls);
// Process in batches with a delay between each batch
const batchSize = 10; // Number of URLs to process per batch
const delayBetweenBatches = 30000; // 30 seconds delay between batches
for (let i = 0; i < browseUrls.length; i += batchSize) {
const batch = browseUrls.slice(i, i + batchSize);
console.log(`Processing batch ${Math.ceil(i / batchSize) + 1}`);
await crawler.run(batch);
if (i + batchSize < browseUrls.length) {
console.log(`Pausing for ${delayBetweenBatches / 1000} seconds before the next batch...`);
await new Promise((resolve) => setTimeout(resolve, delayBetweenBatches));
}
}
wsl -- ip -o -4 addr list eth0
eth0 inet 172.20.18.157/20 brd 172.20.31.255 scope global eth0\ valid_lft forever preferred_lft forever
Gebruik dit adres om te zoeken:
#curl http://172.20.18.157:11434/api/generate -d "{\"model\":\"llama3.2:1b\",\"prompt\":\"hello\"}"
curl http://172.20.18.157:11434/api/generate -d "{\"model\":\"tinydolphin\",\"prompt\":\"hello\"}"
Op Acer: met curl curl http://localhost:11434/api/chat -d "{\"model\":\"tinydolphin\",\"prompt\":\"hello\"}"
rpm --import https://packages.microsoft.com/keys/microsoft.asc echo -e "[code]\nname=Visual Studio Code\nbaseurl=https://packages.microsoft.com/yumrepos/vscode\nenabled=1\ngpgcheck=1\ngpgkey=https://packages.microsoft.com/keys/microsoft.asc" | sudo tee /etc/yum.repos.d/vscode.repo > /dev/null dnf install -y code # or code-insiders
Put this inside the models:
{ "models": [ { "apiBase": "http://localhost:11434/", "model": "tinydolphin", "provider": "ollama", "title": "MyOwn" } ], "tabAutocompleteModel": { "title": "tinydolphin", "provider": "ollama", "model": "tinydolphin" }, "embeddingsProvider": { "provider": "transformers.js" } }"models": [ { "apiBase": "http://172.20.18.157:11434/", "model": "tinydolphin", "provider": "ollama", "title": "MyOwn" } ]
Write an index.html with a title 'Hello World' and a body with a button with a label 'Click Me' and when pressed should alert the following code 'You Hit Me'.
python3 demo.py --prompt "A girl riding a horse on the beach with fish flying in the air. Realistic style "
export PODMAN_IGNORE_CGROUPSV1_WARNING=1 podman stop BesDev; podman rm BesDev; mkdir -p /home/podman/BesDev podman run --name=BesDev -v /home/podman/BesDev:/BesDev:z -t -d container-registry.oracle.com/os/oraclelinux:8 /bin/bash -c " sleep infinity " echo "Start: " echo "podman exec -ti BesDev /bin/bash" echo " cd /BesDev; sh install.sh "
cd /home/podman/BesDev dnf install -y git dnf install -y pip yum install -y python3.9 yum install -y npm npm install pip yum install -y git cp /usr/bin/python3.9 /usr/bin/python python -V git clone https://github.com/bes-dev/stable_diffusion.openvino.git cd stable_diffusion.openvino/ pip3 install openvino==2024.6.0 pip3 install openvino-dev[onnx,pytorch]==2024.6.0 dnf install -y mesa-libGL pip3 install torch echo " change requirements "
#export PYTHONPATH=$PWD:$PYTHONPATH #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
class Neuron {
constructor() {
this.inputConnections = []
this.outputConnections = []
this.bias = 0
// delta is used to store a percentage of change in the weight
this.delta = 0
this.output = 0
this.error = 0
}
getRandomBias() {
const min = -3;
const max = 3
return Math.floor(Math.random() * (+max - +min)) +min;
}
addInputConnection(connection) {
this.inputConnections.push(connection)
}
addOutputConnection(connection) {
this.outputConnections.push(connection)
}
setBias(val) {
this.bias = val
}
setOutput(val) {
this.output = val
}
setDelta(val) {
this.delta = val
}
setError(val) {
this.error = val
}
}
// End Of Neuron
class Layer {
constructor(numberOfNeurons) {
const neurons = []
for (var j = 0; j < numberOfNeurons; j++) {
// const value = Math.random()
const neuron = new Neuron()
// Neurons in other than initial layer have a bias value
neurons.push(neuron)
}
this.neurons = neurons
}
}
// End Of Layer
// Connect Layers
class Connection {
constructor(from, to) {
this.from = from
this.to = to
this.weight = Math.random()
this.change = 0
}
setWeight(w) {
this.weight = w
}
setChange(val) {
this.change = val
}
}
// End Of Connection
class Network {
constructor(numberOfLayers) {
this.layers = numberOfLayers.map((length, index) => {
const layer = new Layer(length)
if (index !== 0 ) {
layer.neurons.forEach(neuron => {
neuron.setBias(neuron.getRandomBias())
})
}
return layer
})
this.learningRate = 0.3 // multiply's against the input and the delta then adds to momentum
this.momentum = 0.1 // multiply's against the specified "change" then adds to learning rate for change
this.iterations = 0
this.connectLayers()
}
setLearningRate(value) {
this.learningRate = value
}
setIterations(val) {
this.iterations = val
}
connectLayers() {
for (var layer = 1; layer < this.layers.length; layer++) {
const thisLayer = this.layers[layer]
const prevLayer = this.layers[layer - 1]
for (var neuron = 0; neuron < prevLayer.neurons.length; neuron++) {
for(var neuronInThisLayer = 0; neuronInThisLayer < thisLayer.neurons.length; neuronInThisLayer++) {
const connection = new Connection(prevLayer.neurons[neuron], thisLayer.neurons[neuronInThisLayer])
prevLayer.neurons[neuron].addOutputConnection(connection)
thisLayer.neurons[neuronInThisLayer].addInputConnection(connection)
}
}
}
}
// When training we will run this set of functions each time
train(input, output) {
this.activate(input)
// Forward propagate
this.forwardPropagation()
// backpropagate
this.backwardPropagation(output)
this.adjustWeights()
this.setIterations(this.iterations + 1)
}
activate(values) {
this.layers[0].neurons.forEach((n, i) => {
n.setOutput(values[i])
})
}
run() {
// For now we only use sigmoid function
return this.forwardPropagation()
}
forwardPropagation() {
for (var layer = 1; layer < this.layers.length; layer++) {
for (var neuron = 0; neuron < this.layers[layer].neurons.length; neuron++) {
const bias = this.layers[layer].neurons[neuron].bias
// For each neuron in this layer we compute its output value
const connectionsValue = this.layers[layer].neurons[neuron].inputConnections.reduce((prev, conn) => {
const val = conn.weight * conn.from.output
return prev + val
}, 0)
this.layers[layer].neurons[neuron].setOutput(sigmoid(bias + connectionsValue))
}
}
return this.layers[this.layers.length - 1].neurons.map(n => n.output)
}
backwardPropagation(target) {
for (let layer = this.layers.length - 1; layer >= 0; layer--) {
const currentLayer = this.layers[layer]
for (let neuron = 0; neuron < currentLayer.neurons.length; neuron++) {
const currentNeuron = currentLayer.neurons[neuron]
let output = currentNeuron.output;
let error = 0;
if (layer === this.layers.length -1 ) {
// Is output layer
error = target[neuron] - output;
// console.log('calculate delta, error, last layer', error)
}
else {
// Other than output layer
for (let k = 0; k < currentNeuron.outputConnections.length; k++) {
const currentConnection = currentNeuron.outputConnections[k]
error += currentConnection.to.delta * currentConnection.weight
// console.log('calculate delta, error, inner layer', error)
}
}
currentNeuron.setError(error)
currentNeuron.setDelta(error * output * (1 - output))
}
}
}
adjustWeights() {
for (let layer = 1; layer <= this.layers.length -1; layer++) {
const prevLayer = this.layers[layer - 1]
const currentLayer = this.layers[layer]
for (let neuron = 0; neuron < currentLayer.neurons.length; neuron++) {
const currentNeuron = currentLayer.neurons[neuron]
let delta = currentNeuron.delta
for (let i = 0; i < currentNeuron.inputConnections.length; i++) {
const currentConnection = currentNeuron.inputConnections[i]
let change = currentConnection.change
change = (this.learningRate * delta * currentConnection.from.output)
+ (this.momentum * change);
currentConnection.setChange(change)
currentConnection.setWeight(currentConnection.weight + change)
}
currentNeuron.setBias(currentNeuron.bias + (this.learningRate * delta))
}
}
}
}
// End Of Network
function sigmoid(z) {
return 1 / (1 + Math.exp(-z));
}
// End Of Functions
//
// -------------------------
//
// Main
//
// Define the layer structure
const layers = [
2, // This is the input layer
10, // Hidden layer 1
10, // Hidden layer 2
1 // Output
]
const network = new Network(layers)
// Start training
const numberOfIterations = 20000
// Training data for a "XOR" logic gate
const trainingData = [{
input : [0,0],
output: [0]
}, {
input : [0,1],
output: [1]
}, {
input : [1,0],
output: [1]
}, {
input : [1,1],
output: [0]
}]
for(var i = 0; i < numberOfIterations; i ++) {
// Get a random training sample
const trainingItem = trainingData[Math.floor((Math.random()*trainingData.length))]
network.train(trainingItem.input, trainingItem.output);
}
// After training we can see if it works
// we call activate to set a input in the first layer
network.activate(trainingData[0].input)
const resultA = network.run()
network.activate(trainingData[1].input)
const resultB = network.run()
network.activate(trainingData[2].input)
const resultC = network.run()
network.activate(trainingData[3].input)
const resultD = network.run()
console.log('Expected 0 got', resultA[0])
console.log('Expected 1 got', resultB[0])
console.log('Expected 1 got', resultC[0])
console.log('Expected 0 got', resultD[0])
cd /Node mkdir Brain cd Brain mkdir xor cd xor
const brain = require('brain.js');
const net = new brain.NeuralNetwork();
// Je kan hier op geven hoeveel hiddenlayers er zijn
// const net = new brain.NeuralNetwork({ hiddenLayers: [20] });
net.train([
{ input: [0, 0], output: [0] },
{ input: [0, 1], output: [1] },
{ input: [1, 0], output: [1] },
{ input: [1, 1], output: [0] },
]);
console.log("xor 1,0 : ",
net.run([1, 0]), " Als het bijna 1 is, dan is het 1 "
);
----
const brain = require('brain.js');
const trainingData = [
'Jane saw Doug.',
'Doug saw Jane.',
'Spot saw Doug and Jane looking at each other.',
'It was love at first sight, and Spot had a frontrow seat. It was a very special moment'
];
// zelfde opties : hiddenLayers bijvoorbeeld
const net = new brain.recurrent.LSTM();
console.log("TRAIN");
net.train(trainingData, {
iterations: 1500,
errorThresh: 0.011,
log: true,
logPeriod: 10
});
console.log("Jane", net.run('Jane'));
console.log("It was", net.run('It was'));
// Show errorThresh and iterations difference
----
//
// Reinforcement learning
//
//
// Bron: https://www.youtube.com/watch?v=6E6XecoTRVo
const brain = require('brain.js');
const trainingData =[
{ input: [0, 0], output: [0] },
{ input: [0, 1], output: [1] },
//{ input: [1, 0], output: [1] },
//{ input: [1, 1], output: [0] },
];
const net = new brain.NeuralNetwork({hiddenLayers: [3] });
net.train(trainingData);
console.log(" Voor reinforcement (het systeem heeft deze data nog niet gezien) " )
console.log(Array.from(net.run([1,0])));
trainingData.push({ input: [1,0], output: [1] });
net.train(trainingData);
console.log(" Na reinforcement " )
console.log(Array.from(net.run([1,0])));
----Latent diffiusin models (LDMs) in feite compressie .. 192x192x3 pixels naar kleiner setje 32x32x3 pixels
--- conditional Use a dataset .. And maybe use a script to generate text prompts for the features. Dus een man of een vrouw, met lang of kort haar, lachend , huilend etc .. Text embeding op basis van een CLIP model Tokenizer (maak unieke opzoekbare labels) = " jonge lachende vrouw " = " jong ", " lach " , " vrouw" Dat gaat naarde Text Encoder (LSTM zie Brainjs) en dat levert een aantal embedded vectors op (getallen) Er zijn dan hidden layers van de Text Encoder en de Hidden Layers van de Image encoder en daar wordt dan de 'dot product' van berekent. Dot Product: ' Boodschappen lijstje : 3 appels x 2 euro = 6 euro 2 eieren x 1 euro = 2 euro 5 broden x 3 euro = 15 euro Tel dit allemaal op : 23 euro <- dat is een dot product en dit wordt getoond als een soort spreadsheet: Rij 1 : [ 3,2,5 ] de aantallen Rij 2: [ 2,1,3 ] de prijzen Dus tel op ( van rij 1.kolom1 x rij 2.kolom1 ) + ( rij 1 kolom.2 x rij 2.kolom2) + (rij 1 kolom.3 x rij 2.kolom3) " diffusion model conditional nodejs brainjs noise forard diffusion ddpm scheduler reverse diffusion u-net architecture convolution network text embeding large training set labels "
Python:
from diffusers import DDMScheduler
max_timesteps = 10;
noise_scheduler = DDMScheduler(num_train_timesteps=max_timesteps,
beta_start = -0.00001,
beta_end = 0.02
)
# which image? Where is the method?
image = torch.tensor(load_img());
img_shape = image.shape;
timesteps = torch.arrange(1, max_timesteps)
noise = torch.randn(img_shape)
noise_images = noise_scheduler.add_noise(image, noise, timesteps).numpy()
plot_images(timesteps, noise_images)
Python code :
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
from diffusers import UNet2DModel, DDPMScheduler
# Download and prepare dataset
DATASET_URL = "https://storage.googleapis.com/mnist-dataset/mnist_png.tar.gz"
DATA_DIR = "./mnist_data"
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR, exist_ok=True)
os.system(f"wget {DATASET_URL} -O mnist.tar.gz")
os.system("tar -xzf mnist.tar.gz -C ./mnist_data")
# Define image transformations
transform = transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.Resize((28, 28)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
# Load the dataset
dataset = datasets.ImageFolder(root=os.path.join(DATA_DIR, "mnist_png", "training"), transform=transform)
data_loader = DataLoader(dataset, batch_size=64, shuffle=True)
# Define the U-Net model
model = UNet2DModel(
sample_size=28, # Image size (28x28 for MNIST)
in_channels=1, # Grayscale input
out_channels=1, # Grayscale output
layers_per_block=2,
block_out_channels=(64, 128, 256), # Number of channels in each U-Net block
down_block_types=("DownBlock2D", "DownBlock2D", "AttnDownBlock2D"),
up_block_types=("AttnUpBlock2D", "UpBlock2D", "UpBlock2D"),
)
# Define the diffusion scheduler
scheduler = DDPMScheduler(num_train_timesteps=1000)
# Define the optimizer
optimizer = optim.Adam(model.parameters(), lr=1e-4)
# Define training loop
def train_model(num_epochs=5):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.train()
for epoch in range(num_epochs):
for batch_idx, (images, _) in enumerate(data_loader):
images = images.to(device)
# Forward diffusion (add noise)
timesteps = torch.randint(0, scheduler.num_train_timesteps, (images.size(0),), device=device)
noisy_images, noise = scheduler.add_noise(images, timesteps)
# Predict the noise
predicted_noise = model(noisy_images, timesteps).sample
# Compute loss (mean squared error)
loss = nn.MSELoss()(predicted_noise, noise)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print(f"Epoch [{epoch+1}/{num_epochs}], Step [{batch_idx+1}/{len(data_loader)}], Loss: {loss.item():.4f}")
print("Training complete.")
# Define a test function to visualize results
def test_model():
import matplotlib.pyplot as plt
model.eval()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
images, _ = next(iter(data_loader))
images = images.to(device)
timesteps = torch.randint(0, scheduler.num_train_timesteps, (images.size(0),), device=device)
noisy_images, _ = scheduler.add_noise(images, timesteps)
with torch.no_grad():
denoised_images = model(noisy_images, timesteps).sample
# Plot original, noisy, and denoised images
fig, axes = plt.subplots(3, 5, figsize=(10, 6))
for i in range(5):
axes[0, i].imshow(images[i].cpu().squeeze(), cmap="gray")
axes[0, i].set_title("Original")
axes[1, i].imshow(noisy_images[i].cpu().squeeze(), cmap="gray")
axes[1, i].set_title("Noisy")
axes[2, i].imshow(denoised_images[i].cpu().squeeze(), cmap="gray")
axes[2, i].set_title("Denoised")
plt.tight_layout()
plt.show()
# Train and test the model
train_model(num_epochs=5)
test_model()
# Load Data Set
images = load_batch_data(batch_size=128)
img_shape = images.shape # Batch size, num channels w, h)
# Run augmentations like flipping, brightness, contrast , rotation and image size
augmented_images = augment(images)
# Forward
timesteps = torch.randint(0, max_timesteps, {len(images),})
noise = torch.randn(img_shape)
noisy_images=noise_scheduler.add_noise(images, noise, timesteps).numpy()
predition = unet_model(noisy_images, timesteps, return_dict=False)[0]
los = F.mse_loss(predition, noise)
# Update .. back
los.backward()
optimizer.step()
optimizer.zero_grad()
# Inference
tareget_img_shape = [32,32,3]
generator = torch.Generator(device=device).manual_seed(np.random.rnadnit(1,100))
image=torch.randn(size=(1,*target_img_shape), generator=generator)
for idx, t in enumerate(timesteps):
model_output = unet_model(image,t)
image = noise_scheduler.step(model_output, t, image, generator=generator).prev_sample
ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N "" ssh-keygen -t dsa -f /etc/ssh/ssh_host_dsa_key -N "" ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N "" ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ed25519_key -N ""/usr/sbin/sshd # systemctl restart sshd
sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* yum update -y systemctl set-default graphical.target setsebool -P xrdp_selinux on dnf -y group install "xfce" && dnf -y install lightdm dnf -y install epel-release && dnf config-manager --set-enabled powertools systemctl enable lightdm dnf -y install xrdp xorgxrdp
[Xorg] name=Xorg lib=libxup.so username=ask password=ask port=-1 code=20 # Frame capture interval (milliseconds) h264_frame_interval=16 rfx_frame_interval=32 normal_frame_interval=40 param=/usr/libexec/Xorg #[Xvnc] #name=Xvnc #lib=libvnc.so #username=ask #password=ask #ip=127.0.0.1 #port=-1 #xserverbpp=24 #delay_ms=2000 ; Disable requested encodings to support buggy VNC servers ; (1 = ExtendedDesktopSize) #disabled_encodings_mask=0 # Ergens in de file staat ook max_bpp max_bpp=24uitschakelen van Wayland
sed '/^#WaylandEnable/s/^#//g' /etc/gdm/custom.conf sed -i 's/^#.*WaylandEnable=.*/WaylandEnable=false/' /etc/gdm/custom.conf
Melding dat je deze moet hebben : NVIDIA 390.xx legacy Linux :
dnf groupinstall -y "Development Tools"
dnf install -y openssl-devel zlib-devel
#Stop X from starting
service gdm stop
# vi /etc/default/grub
# add ' 3 rd.driver.blacklist=nouveau' to the CMDLINE
grub2-mkconfig -o /boot/grub2/grub.cfg
yum install -y kernel
yum install -y kernel-headers
yum install -y kernel-devel
dnf install kernel-devel
yum -y install epel-release
# fedora
echo -e "blacklist nouveau\noptions nouveau modeset=0\n" > /usr/lib/modprobe.d/blacklist-nouveau.conf
# centos
echo -e "blacklist nouveau\noptions nouveau modeset=0\n" > /etc/modprobe.d/blacklist-nouveau.conf
yum install -y dkms
yum update -y kernel kernel-headers
dracut --force
sync
reb00t
echo "/usr/sbin/modprobe" > /proc/sys/kernel/modprobe
VERSION=390.147
wget http://us.download.nvidia.com/XFree86/Linux-x86_64/${VERSION}/NVIDIA-Linux-x86_64-${VERSION}.run
chmod +x NVIDIA-Linux-x86_64-${VERSION}.run
./NVIDIA-Linux-x86_64-${VERSION}.run
Register with DKMS (yes)
nvidia-smi # Test
total duration: 12m32.390262212s load duration: 23.430151ms prompt eval count: 32 token(s) prompt eval duration: 5.835s prompt eval rate: 5.48 tokens/s eval count: 1171 token(s) eval duration: 12m26.53s eval rate: 1.57 tokens/s