BATOSAY Shell
Server IP : 170.10.162.208  /  Your IP : 216.73.216.181
Web Server : LiteSpeed
System : Linux altar19.supremepanel19.com 4.18.0-553.69.1.lve.el8.x86_64 #1 SMP Wed Aug 13 19:53:59 UTC 2025 x86_64
User : deltahospital ( 1806)
PHP Version : 7.4.33
Disable Function : NONE
MySQL : OFF  |  cURL : ON  |  WGET : ON  |  Perl : ON  |  Python : ON  |  Sudo : OFF  |  Pkexec : OFF
Directory :  /home/deltahospital/test.delta-hospital.com/

Upload File :
current_dir [ Writeable ] document_root [ Writeable ]

 

Command :


[ HOME ]     

Current File : /home/deltahospital/test.delta-hospital.com/tuned.tar
cloudlinux-dummy000075500000000000150510234720007776 0ustar00functions000064400000036164150510234720006510 0ustar00#
# This is library of helper functions that can be used in scripts in tuned profiles.
#
# API provided by this library is under heavy development and could be changed anytime
#

#
# Config
#
STORAGE=/run/tuned
STORAGE_PERSISTENT=/var/lib/tuned
STORAGE_SUFFIX=".save"

#
# Helpers
#

# Save value
# $0 STORAGE_NAME VALUE
save_value() {
	[ "$#" -ne 2 ] && return
	[ "$2" -a -e "${STORAGE}" ] && echo "$2" > "${STORAGE}/${1}${STORAGE_SUFFIX}"
}

# Parse sysfs value, i.e. for "val1 [val2] val3" return "val2"
# $0 SYSFS_NAME
parse_sys() {
	local V1 V2
	[ -r "$1" ] || return
	V1=`cat "$1"`
	V2="${V1##*[}"
	V2="${V2%%]*}"
	echo "${V2:-$V1}"
}

# Save sysfs value
# $0 STORAGE_NAME SYSFS_NAME
save_sys() {
	[ "$#" -ne 2 ] && return
	[ -r "$2" -a ! -e "${STORAGE}/${1}${STORAGE_SUFFIX}" ] && parse_sys "$2" > "${STORAGE}/${1}${STORAGE_SUFFIX}"
}

# Set sysfs value
# $0 SYSFS_NAME VALUE
set_sys() {
	[ "$#" -ne 2 ] && return
	[ -w "$1" ] && echo "$2" > "$1"
}

# Save and set sysfs value
# $0 STORAGE_NAME SYSFS_NAME VALUE
save_set_sys() {
	[ "$#" -ne 3 ] && return
	save_sys "$1" "$2"
	set_sys "$2" "$3"
}

# Get stored sysfs value from storage
# $0 STORAGE_NAME
get_stored_sys() {
	[ "$#" -ne 1 ] && return
	[ -r "${STORAGE}/${1}${STORAGE_SUFFIX}" ] && cat "${STORAGE}/${1}${STORAGE_SUFFIX}"
}

# Restore value from storage
# $0 STORAGE_NAME
restore_value() {
	[ "$#" -ne 1 ] && return
	_rs_value="`get_stored_sys \"$1\"`"
	unlink "${STORAGE}/${1}${STORAGE_SUFFIX}" >/dev/null 2>&1
	[ "$_rs_value" ] && echo "$_rs_value"
}

# Restore sysfs value from storage, if nothing is stored, use VALUE
# $0 STORAGE_NAME SYSFS_NAME [VALUE]
restore_sys() {
	[ "$#" -lt 2 -o "$#" -gt 3 ] && return
	_rs_value="`get_stored_sys \"$1\"`"
	unlink "${STORAGE}/${1}${STORAGE_SUFFIX}" >/dev/null 2>&1
	[ "$_rs_value" ] || _rs_value="$3"
	[ "$_rs_value" ] && set_sys "$2" "$_rs_value"
}


#
# DISK tuning
#

DISKS_DEV="$(command ls -d1 /dev/[shv]d*[a-z] 2>/dev/null)"
DISKS_SYS="$(command ls -d1 /sys/block/{sd,cciss,dm-,vd,dasd,xvd}* 2>/dev/null)"

_check_elevator_override()
{
	/bin/fgrep -q 'elevator=' /proc/cmdline
}

# $0 OPERATOR DEVICES ELEVATOR
_set_elevator_helper() {
	_check_elevator_override && return
	SYS_BLOCK_SDX=""
	[ "$2" ] && SYS_BLOCK_SDX=$(eval LANG=C /bin/ls -1 "${2}" 2>/dev/null)

	# if there is no kernel command line elevator settings, apply the elevator
	if [ "$1" -a "$SYS_BLOCK_SDX" ]; then
		for i in $SYS_BLOCK_SDX; do
			se_dev="`echo \"$i\" | sed 's|/sys/block/\([^/]\+\)/queue/scheduler|\1|'`"
			$1 "elevator_${se_dev}" "$i" "$3"
		done
	fi
}

# $0 DEVICES ELEVATOR
set_elevator() {
	_set_elevator_helper save_set_sys "$1" "$2"
}

# $0 DEVICES [ELEVATOR]
restore_elevator() {
	re_elevator="$2"
	[ "$re_elevator" ] || re_elevator=cfq
	_set_elevator_helper restore_sys "$1" "$re_elevator"
}

# SATA Aggressive Link Power Management
# usage: set_disk_alpm policy
set_disk_alpm() {
	policy=$1

        for host in /sys/class/scsi_host/*; do
                if [ -f $host/ahci_port_cmd ]; then
                        port_cmd=`cat $host/ahci_port_cmd`;
                        if [ $((0x$port_cmd & 0x240000)) = 0 -a -f $host/link_power_management_policy ]; then
                                echo $policy >$host/link_power_management_policy;
                        else
                                echo "max_performance" >$host/link_power_management_policy;
                        fi
                fi
        done
}

# usage: set_disk_apm level
set_disk_apm() {
	level=$1
	for disk in $DISKS_DEV; do
		hdparm -B $level $disk &>/dev/null
	done
}

# usage: set_disk_spindown level
set_disk_spindown() {
	level=$1
	for disk in $DISKS_DEV; do
		hdparm -S $level $disk &>/dev/null
	done
}

# usage: multiply_disk_readahead by
multiply_disk_readahead() {
	by=$1

	# float multiplication not supported in bash
	# bc might not be installed, python is available for sure

	for disk in $DISKS_SYS; do
		control="${disk}/queue/read_ahead_kb"
		old=$(cat $control)
		new=$(echo "print int($old*$by)" | python)

		(echo $new > $control) &>/dev/null
	done
}

# usage: remount_disk options partition1 partition2 ...
remount_partitions() {
	options=$1
	shift

	for partition in $@; do
		mount -o remount,$options $partition >/dev/null 2>&1
	done
}

remount_all_no_rootboot_partitions() {
	[ "$1" ] || return
	# Find non-root and non-boot partitions, disable barriers on them
	rootvol=$(df -h / | grep "^/dev" | awk '{print $1}')
	bootvol=$(df -h /boot | grep "^/dev" | awk '{print $1}')
	volumes=$(df -hl --exclude=tmpfs | grep "^/dev" | awk '{print $1}')
	nobarriervols=$(echo "$volumes" | grep -v $rootvol | grep -v $bootvol)
	remount_partitions "$1" $nobarriervols
}


DISK_QUANTUM_SAVE="${STORAGE}/disk_quantum${STORAGE_SUFFIX}"

set_disk_scheduler_quantum() {
	value=$1
	rm -f "$DISK_QUANTUM_SAVE"
	for disk in $DISKS_SYS; do
		control="${disk}/queue/iosched/quantum"
		echo "echo $(cat $control) > $control" >> "$DISK_QUANTUM_SAVE" 2>/dev/null
		(echo $value > $control) &2>/dev/null
	done
}

restore_disk_scheduler_quantum() {
	if [ -r "$DISK_QUANTUM_SAVE" ]; then
		/bin/bash "$DISK_QUANTUM_SAVE" &>/dev/null
		rm -f "$DISK_QUANTUM_SAVE"
	fi
}

#
# CPU tuning
#

CPUSPEED_SAVE_FILE="${STORAGE}/cpuspeed${STORAGE_SUFFIX}"
CPUSPEED_ORIG_GOV="${STORAGE}/cpuspeed-governor-%s${STORAGE_SUFFIX}"
CPUSPEED_STARTED="${STORAGE}/cpuspeed-started"
CPUSPEED_CFG="/etc/sysconfig/cpuspeed"
CPUSPEED_INIT="/etc/rc.d/init.d/cpuspeed"
# do not use cpuspeed
CPUSPEED_USE="0"
CPUS="$(ls -d1 /sys/devices/system/cpu/cpu* | sed 's;^.*/;;' |  grep "cpu[0-9]\+")"

# set CPU governor setting and store the old settings
# usage: set_cpu_governor governor
set_cpu_governor() {
	governor=$1

	# always patch cpuspeed configuration if exists, if it doesn't exist and is enabled,
	# explicitly disable it with hint
	if [ -e $CPUSPEED_INIT ]; then
		if [ ! -e $CPUSPEED_SAVE_FILE -a -e $CPUSPEED_CFG ]; then
			cp -p $CPUSPEED_CFG $CPUSPEED_SAVE_FILE
			sed -e 's/^GOVERNOR=.*/GOVERNOR='$governor'/g' $CPUSPEED_SAVE_FILE > $CPUSPEED_CFG
		fi
	else
		if [ "$CPUSPEED_USE" = "1" ]; then
			echo >&2
			echo "Suggestion: install 'cpuspeed' package to get best tuning results." >&2
			echo "Falling back to sysfs control." >&2
			echo >&2
		fi

		CPUSPEED_USE="0"
	fi

	if [ "$CPUSPEED_USE" = "1" ]; then
		service cpuspeed status &> /dev/null
		[ $? -eq 3 ] && touch $CPUSPEED_STARTED || rm -f $CPUSPEED_STARTED

		service cpuspeed restart &> /dev/null

	# direct change using sysfs
	elif [ -e /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor ]; then

		for cpu in $CPUS; do
			gov_file=/sys/devices/system/cpu/$cpu/cpufreq/scaling_governor
			save_file=$(printf $CPUSPEED_ORIG_GOV $cpu)
			rm -f $save_file
			if [ -e $gov_file ]; then
				cat $gov_file > $save_file
				echo $governor > $gov_file
			fi
		done
	fi
}

# re-enable previous CPU governor settings
# usage: restore_cpu_governor
restore_cpu_governor() {
	if [ -e $CPUSPEED_INIT ]; then
		if [ -e $CPUSPEED_SAVE_FILE ]; then
			cp -fp $CPUSPEED_SAVE_FILE $CPUSPEED_CFG
			rm -f $CPUSPEED_SAVE_FILE
		fi

		if [ "$CPUSPEED_USE" = "1" ]; then
			if [ -e $CPUSPEED_STARTED ]; then
				service cpuspeed stop &> /dev/null
			else
				service cpuspeed restart &> /dev/null
			fi
		fi
		if [ -e $CPUSPEED_STARTED ]; then
			rm -f $CPUSPEED_STARTED
		fi
	else
		CPUSPEED_USE="0"
	fi

	if [ "$CPUSPEED_USE" != "1" -a -e /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor ]; then
		for cpu in $CPUS; do
			cpufreq_dir=/sys/devices/system/cpu/$cpu/cpufreq
			save_file=$(printf $CPUSPEED_ORIG_GOV $cpu)

			if [ -e $cpufreq_dir/scaling_governor ]; then
				if [ -e $save_file ]; then
					cat $save_file > $cpufreq_dir/scaling_governor
					rm -f $save_file
				else
					echo userspace > $cpufreq_dir/scaling_governor
					cat $cpufreq_dir/cpuinfo_max_freq > $cpufreq_dir/scaling_setspeed
				fi
			fi
		done
	fi
}

_cpu_multicore_powersave() {
	value=$1
	[ -e /sys/devices/system/cpu/sched_mc_power_savings ] && echo $value > /sys/devices/system/cpu/sched_mc_power_savings
}

# enable multi core power savings for low wakeup systems
enable_cpu_multicore_powersave() {
	_cpu_multicore_powersave 1
}

disable_cpu_multicore_powersave() {
	_cpu_multicore_powersave 0
}

#
# MEMORY tuning
#

THP_ENABLE="/sys/kernel/mm/transparent_hugepage/enabled"
THP_SAVE="${STORAGE}/thp${STORAGE_SUFFIX}"

[ -e "$THP_ENABLE" ] || THP_ENABLE="/sys/kernel/mm/redhat_transparent_hugepage/enabled"

enable_transparent_hugepages() {
	if [ -e $THP_ENABLE ]; then
		cut -f2 -d'[' $THP_ENABLE  | cut -f1 -d']' > $THP_SAVE
		(echo always > $THP_ENABLE) &> /dev/null
	fi
}

restore_transparent_hugepages() {
	if [ -e $THP_SAVE ]; then
		(echo $(cat $THP_SAVE) > $THP_ENABLE) &> /dev/null
		rm -f $THP_SAVE
	fi
}

#
# WIFI tuning
#

# usage: _wifi_set_power_level level
_wifi_set_power_level() {
	# 0    auto, PM enabled
	# 1-5  least savings and lowest latency - most savings and highest latency
	# 6    disable power savings
	level=$1

	# do not report errors on systems with no wireless
	[ -e /proc/net/wireless ] || return 0

	# apply the settings using iwpriv
	ifaces=$(cat /proc/net/wireless | grep -v '|' | sed 's@^ *\([^:]*\):.*@\1@')
	for iface in $ifaces; do
		iwpriv $iface set_power $level
	done

	# some adapters may rely on sysfs
	for i in /sys/bus/pci/devices/*/power_level; do
		(echo $level > $i) &> /dev/null
	done
}

enable_wifi_powersave() {
	_wifi_set_power_level 5
}

disable_wifi_powersave() {
	_wifi_set_power_level 0
}

#
# BLUETOOTH tuning
#

disable_bluetooth() {
	hciconfig hci0 down >/dev/null 2>&1
	lsmod | grep -q btusb && rmmod btusb
}

enable_bluetooth() {
	modprobe btusb
	hciconfig hci0 up >/dev/null 2>&1
}

#
# USB tuning
#

_usb_autosuspend() {
	value=$1
	for i in /sys/bus/usb/devices/*/power/autosuspend; do echo $value > $i; done &> /dev/null
}

enable_usb_autosuspend() {
	_usb_autosuspend 1
}

disable_usb_autosuspend() {
	_usb_autosuspend 0
}

#
# SOUND CARDS tuning
#

enable_snd_ac97_powersave() {
	save_set_sys ac97 /sys/module/snd_ac97_codec/parameters/power_save Y
}

disable_snd_ac97_powersave() {
	save_set_sys ac97 /sys/module/snd_ac97_codec/parameters/power_save N
}

restore_snd_ac97_powersave() {
	restore_sys ac97 /sys/module/snd_ac97_codec/parameters/power_save $1
}

set_hda_intel_powersave() {
	save_set_sys hda_intel /sys/module/snd_hda_intel/parameters/power_save $1
}

restore_hda_intel_powersave() {
	restore_sys hda_intel /sys/module/snd_hda_intel/parameters/power_save $1
}

#
# VIDEO CARDS tuning
#

# Power savings settings for Radeon
# usage: set_radeon_powersave dynpm | default | low | mid | high
set_radeon_powersave () {
	[ "$1" ] || return
	[ -e /sys/class/drm/card0/device/power_method ] || return
	if [ "$1" = default -o "$1" = auto -o "$1" = low -o "$1" = med -o "$1" = high ]; then
		[ -w /sys/class/drm/card0/device/power_profile ] || return
		save_sys radeon_profile /sys/class/drm/card0/device/power_profile
		save_set_sys radeon_method /sys/class/drm/card0/device/power_method profile
		set_sys /sys/class/drm/card0/device/power_profile "$1"
	elif [ "$1" = dynpm ]; then
		save_sys radeon_profile /sys/class/drm/card0/device/power_profile
		save_set_sys radeon_method /sys/class/drm/card0/device/power_method dynpm
	fi
}

restore_radeon_powersave () {
  restore_sys radeon_method /sys/class/drm/card0/device/power_method profile
  _rrp_method="`get_stored_sys radeon_method`"
  [ -z "$_rrp_method" -o _rrp_method="profile" ] && restore_sys radeon_profile /sys/class/drm/card0/device/power_profile default
}

#
# SOFTWARE tuning
#

RSYSLOG_CFG="/etc/rsyslog.conf"
RSYSLOG_SAVE="${STORAGE}/cpuspeed${STORAGE_SUFFIX}"

disable_logs_syncing() {
	cp -p $RSYSLOG_CFG $RSYSLOG_SAVE
	sed -i 's/ \/var\/log/-\/var\/log/' $RSYSLOG_CFG
}

restore_logs_syncing() {
	mv -Z $RSYSLOG_SAVE $RSYSLOG_CFG || mv $RSYSLOG_SAVE $RSYSLOG_CFG
}

irqbalance_banned_cpus_clear() {
    sed -i '/^IRQBALANCE_BANNED_CPUS=/d' /etc/sysconfig/irqbalance || return
    if [ ${1:-restart} = restart ]; then
        systemctl try-restart irqbalance
    fi
}

irqbalance_banned_cpus_setup() {
    irqbalance_banned_cpus_clear norestart
    if [ -n "$1" ]; then
        echo "IRQBALANCE_BANNED_CPUS=$1" >> /etc/sysconfig/irqbalance
    fi
    systemctl try-restart irqbalance
}

#
# HARDWARE SPECIFIC tuning
#

# Asus EEE with Intel Atom
_eee_fsb_control() {
	value=$1
	if [ -e /sys/devices/platform/eeepc/she ]; then
		echo $value > /sys/devices/platform/eeepc/she
	elif [ -e /sys/devices/platform/eeepc/cpufv ]; then
		echo $value > /sys/devices/platform/eeepc/cpufv
	elif [ -e /sys/devices/platform/eeepc-wmi/cpufv ]; then
		echo $value > /sys/devices/platform/eeepc-wmi/cpufv
	fi
}

eee_set_reduced_fsb() {
	_eee_fsb_control 2
}

eee_set_normal_fsb() {
	_eee_fsb_control 1
}

#
# modprobe configuration handling
#

kvm_modprobe_file=/etc/modprobe.d/kvm.rt.tuned.conf

teardown_kvm_mod_low_latency()
{
	rm -f $kvm_modprobe_file
}

setup_kvm_mod_low_latency()
{
	local HAS_KPS=""
	local HAS_NX_HP=""
	local HAS_PLE_GAP=""
	local WANTS_KPS=""
	local WANTS_NX_HP=""
	local WANTS_PLE_GAP=""

	modinfo -p kvm | grep -q kvmclock_periodic_sync && HAS_KPS=1
	modinfo -p kvm | grep -q nx_huge_pages && HAS_NX_HP=1
	modinfo -p kvm_intel | grep -q ple_gap && HAS_PLE_GAP=1
	grep -qs kvmclock_periodic_sync "$kvm_modprobe_file" && WANTS_KPS=1
	grep -qs nx_huge_pages "$kvm_modprobe_file" && WANTS_NX_HP=1
	grep -qs ple_gap "$kvm_modprobe_file" && WANTS_PLE_GAP=1

	if [ "$HAS_KPS" != "$WANTS_KPS" -o "$HAS_PLE_GAP" != "$WANTS_PLE_GAP" -o \
	     "$HAS_NX_HP" != "$WANTS_NX_HP" ]; then
		teardown_kvm_mod_low_latency
		[ "$HAS_KPS" ] && echo "options kvm kvmclock_periodic_sync=0" > $kvm_modprobe_file
		[ "$HAS_NX_HP" ] && echo "options kvm nx_huge_pages=0" >> $kvm_modprobe_file
		[ "$HAS_PLE_GAP" ] && echo "options kvm_intel ple_gap=0" >> $kvm_modprobe_file
	fi
	return 0
}

#
# KSM
#

KSM_SERVICES="ksm ksmtuned"
KSM_RUN_PATH=/sys/kernel/mm/ksm/run
KSM_MASK_FILE="${STORAGE_PERSISTENT}/ksm-masked"

disable_ksm()
{
	if [ ! -f $KSM_MASK_FILE ]; then
		# Always create $KSM_MASK_FILE, since we don't want to
		# run any systemctl commands during boot
		if ! touch $KSM_MASK_FILE; then
			die "failed to create $KSM_MASK_FILE"
		fi
		# Do not run any systemctl commands if $KSM_SERVICES units do not exist
		systemctl cat -- $KSM_SERVICES &> /dev/null || return 0
		systemctl --now --quiet mask $KSM_SERVICES
		# Unmerge all shared pages
		test -f $KSM_RUN_PATH && echo 2 > $KSM_RUN_PATH
	fi
}

# Should only be called when full_rollback == true
enable_ksm()
{
	if [ -f $KSM_MASK_FILE ]; then
		# Do not run any systemctl commands if $KSM_SERVICES units do not exist
		systemctl cat -- $KSM_SERVICES &> /dev/null || return 0
		if systemctl --quiet unmask $KSM_SERVICES; then
			rm -f $KSM_MASK_FILE
		fi
	fi
}

die() {
	echo "$@" >&2
	exit 1
}

#
# ACTION PROCESSING
#

error_not_implemented() {
	echo "tuned: script function '$1' is not implemented." >&2
}

# implicit actions, will be used if not provided by profile script:
#
# * start    must be implemented
# * stop     must be implemented

start() {
	error_not_implemented start
	return 16
}

stop() {
	error_not_implemented stop
	return 16
}

#
# main processing
#

process() {
	ARG="$1"
	shift
	case "$ARG" in
	start)
		start "$@"
		RETVAL=$?
		;;
	stop)
		stop "$@"
		RETVAL=$?
		;;
	verify)
		if declare -f verify &> /dev/null;
		then
			verify "$@"
		else
			:
		fi
		RETVAL=$?
		;;
	*)
		echo $"Usage: $0 {start|stop|verify}"
		RETVAL=2
		;;
	esac

	exit $RETVAL
}
cloudlinux-latency-performance/tuned.conf000064400000003660150510234720014660 0ustar00#
# tuned configuration
#

[main]
summary=Optimized Cloudlinux hosting Servers
include=throughput-performance

[bootloader]
cmdline = systemd.unified_cgroup_hierarchy=0 systemd.legacy_systemd_cgroup_controller cgroup.memory=nokmem

[cpu]
governor=performance
energy_perf_bias=performance
min_perf_pct=100

# The alternation of CPU bound load and disk IO operations of postgresql
# db server suggest CPU to go into powersave mode.
#
# Explicitly disable deep c-states to reduce latency on OLTP workloads.
force_latency=1

[vm]
transparent_hugepages=never

[sysctl]
kernel.numa_balancing = 1
vm.dirty_ratio = 40
vm.dirty_background_ratio = 10
vm.swappiness=10
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_timestamps = 1
# Increase kernel buffer size maximums.  Currently this seems only necessary at 40Gb speeds.
#
# The buffer tuning values below do not account for any potential hugepage allocation.
# Ensure that you do not oversubscribe system memory.
#net.ipv4.tcp_rmem="4096 87380 16777216"
#net.ipv4.tcp_wmem="4096 16384 16777216"
##
# Busy polling helps reduce latency in the network receive path
# by allowing socket layer code to poll the receive queue of a
# network device, and disabling network interrupts.
# busy_read value greater than 0 enables busy polling. Recommended
# net.core.busy_read value is 50.
# busy_poll value greater than 0 enables polling globally. 
# Recommended net.core.busy_poll value is 50 
net.core.busy_read=50
net.core.busy_poll=50

# TCP fast open reduces network latency by enabling data exchange
# during the sender's initial TCP SYN. The value 3 enables fast open
# on client and server connections.
net.ipv4.tcp_fastopen=3

####
vm.zone_reclaim_mode=0

[scheduler]
sched_min_granularity_ns = 10000000
sched_wakeup_granularity_ns = 15000000

[disk-vm]
type=disk
devices = vd*
elevator = mq-deadline

[disk-sas]
type=disk
devices = sd*
elevator = mq-deadline

[disk-nvme]
type=disk
devices = nvme*
elevator = none
readahead = 0
balanced/tuned.conf000064400000000711150510234720010261 0ustar00#
# tuned configuration
#

[main]
summary=General non-specialized tuned profile

[modules]
cpufreq_conservative=+r

[cpu]
priority=10
governor=conservative|powersave
energy_perf_bias=normal
energy_performance_preference=balance_performance

[acpi]
platform_profile=balanced

[audio]
timeout=10

[video]
radeon_powersave=dpm-balanced, auto

[disk]
# Comma separated list of devices, all devices if commented out.
# devices=sda

[scsi_host]
alpm=medium_power
powersave/script.sh000075500000000417150510234720010423 0ustar00#!/bin/bash

. /usr/lib/tuned/functions

start() {
    [ "$USB_AUTOSUSPEND" = 1 ] && enable_usb_autosuspend
    enable_wifi_powersave
    return 0
}

stop() {
    [ "$USB_AUTOSUSPEND" = 1 ] && disable_usb_autosuspend
    disable_wifi_powersave
    return 0
}

process $@
powersave/tuned.conf000064400000001172150510234720010545 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for low power consumption

[cpu]
governor=ondemand|powersave
energy_perf_bias=powersave|power
energy_performance_preference=power

[acpi]
platform_profile=low-power|quiet

[eeepc_she]

[vm]

[audio]
timeout=10

[video]
radeon_powersave=dpm-battery, auto

[disk]
# Comma separated list of devices, all devices if commented out.
# devices=sda

[net]
# Comma separated list of devices, all devices if commented out.
# devices=eth0

[scsi_host]
alpm=min_power

[sysctl]
vm.laptop_mode=5
vm.dirty_writeback_centisecs=1500
kernel.nmi_watchdog=0

[script]
script=${i:PROFILE_DIR}/script.sh
throughput-performance/tuned.conf000064400000006142150510234720013244 0ustar00#
# tuned configuration
#

[main]
summary=Broadly applicable tuning that provides excellent performance across a variety of common server workloads

[variables]
thunderx_cpuinfo_regex=CPU part\s+:\s+(0x0?516)|(0x0?af)|(0x0?a[0-3])|(0x0?b8)\b
amd_cpuinfo_regex=model name\s+:.*\bAMD\b

[cpu]
governor=performance
energy_perf_bias=performance
min_perf_pct=100
energy_performance_preference=performance

[acpi]
platform_profile=performance

# Marvell ThunderX
[vm.thunderx]
type=vm
uname_regex=aarch64
cpuinfo_regex=${thunderx_cpuinfo_regex}
transparent_hugepages=never

[disk]
# The default unit for readahead is KiB.  This can be adjusted to sectors
# by specifying the relevant suffix, eg. (readahead => 8192 s). There must
# be at least one space between the number and suffix (if suffix is specified).
readahead=>4096

[sysctl]
# If a workload mostly uses anonymous memory and it hits this limit, the entire
# working set is buffered for I/O, and any more write buffering would require
# swapping, so it's time to throttle writes until I/O can catch up.  Workloads
# that mostly use file mappings may be able to use even higher values.
#
# The generator of dirty data starts writeback at this percentage (system default
# is 20%)
vm.dirty_ratio = 40

# Start background writeback (via writeback threads) at this percentage (system
# default is 10%)
vm.dirty_background_ratio = 10

# PID allocation wrap value.  When the kernel's next PID value
# reaches this value, it wraps back to a minimum PID value.
# PIDs of value pid_max or larger are not allocated.
#
# A suggested value for pid_max is 1024 * <# of cpu cores/threads in system>
# e.g., a box with 32 cpus, the default of 32768 is reasonable, for 64 cpus,
# 65536, for 4096 cpus, 4194304 (which is the upper limit possible).
#kernel.pid_max = 65536

# The swappiness parameter controls the tendency of the kernel to move
# processes out of physical memory and onto the swap disk.
# 0 tells the kernel to avoid swapping processes out of physical memory
# for as long as possible
# 100 tells the kernel to aggressively swap processes out of physical memory
# and move them to swap cache
vm.swappiness=10

# The default kernel value 128 was over twenty years old default,
# kernel-5.4 increased it to 4096, thus do not have it lower than 2048
# on older kernels
net.core.somaxconn=>2048

[scheduler]
runtime=0
# ktune sysctl settings for rhel6 servers, maximizing i/o throughput
#
# Minimal preemption granularity for CPU-bound tasks:
# (default: 1 msec#  (1 + ilog(ncpus)), units: nanoseconds)
sched_min_granularity_ns = 10000000

# SCHED_OTHER wake-up granularity.
# (default: 1 msec#  (1 + ilog(ncpus)), units: nanoseconds)
#
# This option delays the preemption effects of decoupled workloads
# and reduces their over-scheduling. Synchronous workloads will still
# have immediate wakeup/sleep latencies.
sched_wakeup_granularity_ns = 15000000

# Marvell ThunderX
[sysctl.thunderx]
type=sysctl
uname_regex=aarch64
cpuinfo_regex=${thunderx_cpuinfo_regex}
kernel.numa_balancing=0

# AMD
[scheduler.amd]
type=scheduler
uname_regex=x86_64
cpuinfo_regex=${amd_cpuinfo_regex}
runtime=0
sched_migration_cost_ns=5000000
aws/tuned.conf000064400000001032150510234720007317 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for aws ec2 instances
include=throughput-performance

# Marvell ThunderX
[bootloader.thunderx]
# rhbz:1836058
type=bootloader
uname_regex=aarch64
cmdline=+iommu.strict=0

[bootloader]
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html#timeout-nvme-ebs-volumes
# set nvme_core.io_timeout to 4294967295
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking-ena.html
# set net.ifnames to 0
cmdline=+net.ifnames=0 nvme_core.io_timeout=4294967295
network-latency/tuned.conf000064400000001044150510234720011656 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for deterministic performance at the cost of increased power consumption, focused on low latency network performance
include=latency-performance

[vm]
transparent_hugepages=never

[sysctl]
net.core.busy_read=50
net.core.busy_poll=50
net.ipv4.tcp_fastopen=3
kernel.numa_balancing=0
kernel.hung_task_timeout_secs = 600
kernel.nmi_watchdog = 0
vm.stat_interval = 10
kernel.timer_migration = 0

[bootloader]
cmdline_network_latency=skew_tick=1 tsc=reliable rcupdate.rcu_normal_after_boot=1

[rtentsk]
recommend.d/50-tuned.conf000064400000003551150510234720011152 0ustar00# Tuned rules for recommend_profile.
#
# Syntax:
# [PROFILE1]
# KEYWORD11=RE11
# KEYWORD21=RE12
#
# [PROFILE2]
# KEYWORD21=RE21
# KEYWORD22=RE22

# KEYWORD can be:
# virt            - for RE to match output of virt-what
# system          - for RE to match content of /etc/system-release-cpe
# process         - for RE to match running processes. It can have arbitrary
#                   suffix, all process* lines have to match for the PROFILE
#                   to match (i.e. the AND operator)
# /FILE           - for RE to match content of the FILE, e.g.:
#                   '/etc/passwd=.+'. If file doesn't exist, its RE will not
#                   match.
# chassis_type    - for RE to match the chassis type as reported by dmidecode
# syspurpose_role - for RE to match the system role as reported by syspurpose

# All REs for all KEYWORDs have to match for PROFILE to match (i.e. the AND operator).
# If 'virt' or 'system' is not specified, it matches for every string.
# If 'virt' or 'system' is empty, i.e. 'virt=', it matches only empty string (alias for '^$').
# If several profiles matched, the first match is taken.
#
# Limitation:
# Each profile can be specified only once, because there cannot be
# multiple sections in the configuration file with the same name
# (ConfigObj limitation).
# If there is a need to specify the profile multiple times, unique
# suffix like ',ANYSTRING' can be used. Everything after the last ','
# is stripped by the parser, e.g.:
#
# [balanced,1]
# /FILE1=RE1
#
# [balanced,2]
# /FILE2=RE2
#
# This will set 'balanced' profile in case there is FILE1 matching RE1 or
# FILE2 matching RE2 or both.

[atomic-host]
virt=
syspurpose_role=.*atomic.*

[atomic-guest]
virt=.+
syspurpose_role=.*atomic.*

[virtual-guest]
virt=.+

[balanced]
syspurpose_role=(.*(desktop|workstation).*)|^$
chassis_type=.*(Notebook|Laptop|Portable).*

[throughput-performance]
latency-performance/tuned.conf000064400000003170150510234720012470 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for deterministic performance at the cost of increased power consumption

[cpu]
force_latency=cstate.id_no_zero:1|3
governor=performance
energy_perf_bias=performance
min_perf_pct=100

[acpi]
platform_profile=performance

[sysctl]
# If a workload mostly uses anonymous memory and it hits this limit, the entire
# working set is buffered for I/O, and any more write buffering would require
# swapping, so it's time to throttle writes until I/O can catch up.  Workloads
# that mostly use file mappings may be able to use even higher values.
#
# The generator of dirty data starts writeback at this percentage (system default
# is 20%)
vm.dirty_ratio=10

# Start background writeback (via writeback threads) at this percentage (system
# default is 10%)
vm.dirty_background_ratio=3

# The swappiness parameter controls the tendency of the kernel to move
# processes out of physical memory and onto the swap disk.
# 0 tells the kernel to avoid swapping processes out of physical memory
# for as long as possible
# 100 tells the kernel to aggressively swap processes out of physical memory
# and move them to swap cache
vm.swappiness=10

[scheduler]
runtime=0
# ktune sysctl settings for rhel6 servers, maximizing i/o throughput
#
# Minimal preemption granularity for CPU-bound tasks:
# (default: 1 msec#  (1 + ilog(ncpus)), units: nanoseconds)
sched_min_granularity_ns = 3000000
sched_wakeup_granularity_ns = 4000000

# The total time the scheduler will consider a migrated process
# "cache hot" and thus less likely to be re-migrated
# (system default is 500000, i.e. 0.5 ms)
sched_migration_cost_ns = 5000000
virtual-host/tuned.conf000064400000001043150510234720011170 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for running KVM guests
include=throughput-performance

[sysctl]
# Start background writeback (via writeback threads) at this percentage (system
# default is 10%)
vm.dirty_background_ratio = 5

[cpu]
# Setting C3 state sleep mode/power savings
force_latency=cstate.id_no_zero:3|70

[scheduler]
runtime=0
# The total time the scheduler will consider a migrated process
# "cache hot" and thus less likely to be re-migrated
# (system default is 500000, i.e. 0.5 ms)
sched_migration_cost_ns = 5000000
accelerator-performance/tuned.conf000064400000004161150510234720013316 0ustar00#
# tuned configuration
#

[main]
summary=Throughput performance based tuning with disabled higher latency STOP states

[cpu]
governor=performance
energy_perf_bias=performance
min_perf_pct=100
force_latency=99

[acpi]
platform_profile=performance

[disk]
readahead=>4096

[sysctl]
# If a workload mostly uses anonymous memory and it hits this limit, the entire
# working set is buffered for I/O, and any more write buffering would require
# swapping, so it's time to throttle writes until I/O can catch up.  Workloads
# that mostly use file mappings may be able to use even higher values.
#
# The generator of dirty data starts writeback at this percentage (system default
# is 20%)
vm.dirty_ratio = 40

# Start background writeback (via writeback threads) at this percentage (system
# default is 10%)
vm.dirty_background_ratio = 10

# PID allocation wrap value.  When the kernel's next PID value
# reaches this value, it wraps back to a minimum PID value.
# PIDs of value pid_max or larger are not allocated.
#
# A suggested value for pid_max is 1024 * <# of cpu cores/threads in system>
# e.g., a box with 32 cpus, the default of 32768 is reasonable, for 64 cpus,
# 65536, for 4096 cpus, 4194304 (which is the upper limit possible).
#kernel.pid_max = 65536

# The swappiness parameter controls the tendency of the kernel to move
# processes out of physical memory and onto the swap disk.
# 0 tells the kernel to avoid swapping processes out of physical memory
# for as long as possible
# 100 tells the kernel to aggressively swap processes out of physical memory
# and move them to swap cache
vm.swappiness=10

[scheduler]
# ktune sysctl settings for rhel6 servers, maximizing i/o throughput
#
# Minimal preemption granularity for CPU-bound tasks:
# (default: 1 msec#  (1 + ilog(ncpus)), units: nanoseconds)
sched_min_granularity_ns = 10000000

# SCHED_OTHER wake-up granularity.
# (default: 1 msec#  (1 + ilog(ncpus)), units: nanoseconds)
#
# This option delays the preemption effects of decoupled workloads
# and reduces their over-scheduling. Synchronous workloads will still
# have immediate wakeup/sleep latencies.
sched_wakeup_granularity_ns = 15000000
intel-sst/tuned.conf000064400000000165150510234720010455 0ustar00[main]
summary=Configure for Intel Speed Select Base Frequency

[bootloader]
cmdline_intel_sst=-intel_pstate=disable
network-throughput/tuned.conf000064400000000763150510234720012437 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for streaming network throughput, generally only necessary on older CPUs or 40G+ networks
include=throughput-performance

[sysctl]
# Increase kernel buffer size maximums.  Currently this seems only necessary at 40Gb speeds.
#
# The buffer tuning values below do not account for any potential hugepage allocation.
# Ensure that you do not oversubscribe system memory.
net.ipv4.tcp_rmem="4096 131072 16777216"
net.ipv4.tcp_wmem="4096 16384 16777216"
hpc-compute/tuned.conf000064400000002560150510234720010760 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for HPC compute workloads
description=Configures virtual memory, CPU governors, and network settings for HPC compute workloads.
include=latency-performance

[vm]
# Most HPC application can take advantage of hugepages. Force them to on.
transparent_hugepages=always

[disk]
# Increase the readahead value to support large, contiguous, files.
readahead=>4096

[sysctl]
# Keep a reasonable amount of memory free to support large mem requests
vm.min_free_kbytes=135168

# Most HPC applications are NUMA aware. Enabling zone reclaim ensures
# memory is reclaimed and reallocated from local pages. Disabling
# automatic NUMA balancing prevents unwanted memory unmapping.
vm.zone_reclaim_mode=1
kernel.numa_balancing=0

# Busy polling helps reduce latency in the network receive path
# by allowing socket layer code to poll the receive queue of a
# network device, and disabling network interrupts.
# busy_read value greater than 0 enables busy polling. Recommended
# net.core.busy_read value is 50.
# busy_poll value greater than 0 enables polling globally. 
# Recommended net.core.busy_poll value is 50 
net.core.busy_read=50
net.core.busy_poll=50

# TCP fast open reduces network latency by enabling data exchange
# during the sender's initial TCP SYN. The value 3 enables fast open
# on client and server connections.
net.ipv4.tcp_fastopen=3


cloudlinux-dummy/tuned.conf000064400000000111150510234720012041 0ustar00#
# tuned configuration
#

[main]
summary=Empty CloudLinux tuned profile
optimize-serial-console/tuned.conf000064400000000357150510234720013313 0ustar00#
# tuned configuration
#
# This tuned configuration optimizes for serial console performance at the
# expense of reduced debug information to the console.

[main]
summary=Optimize for serial console use.

[sysctl]
kernel.printk="4 4 1 7"
virtual-guest/tuned.conf000064400000001355150510234720011350 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for running inside a virtual guest
include=throughput-performance

[sysctl]
# If a workload mostly uses anonymous memory and it hits this limit, the entire
# working set is buffered for I/O, and any more write buffering would require
# swapping, so it's time to throttle writes until I/O can catch up.  Workloads
# that mostly use file mappings may be able to use even higher values.
#
# The generator of dirty data starts writeback at this percentage (system default
# is 20%)
vm.dirty_ratio = 30

# Filesystem I/O is usually much more efficient than swapping, so try to keep
# swapping low.  It's usually safe to go even lower than this on systems with
# server-grade storage.
vm.swappiness = 30
epyc-eda/tuned.conf000064400000000600150510234720010214 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for EDA compute workloads on AMD EPYC CPUs
description=Configures virtual memory, CPU governors, and network settings for EDA compute workloads.
include=throughput-performance

# AMD
[scheduler.amd]
type=scheduler
#Allow processes to rapidly move between cores to avoid idle time and maximize CPU usage
sched_migration_cost_ns=10000
cloudlinux-default/tuned.conf000064400000001560150510234720012343 0ustar00#
# tuned configuration
#

[main]
summary=Optimized Cloudlinux hosting Servers
include=throughput-performance

[bootloader]
cmdline = systemd.unified_cgroup_hierarchy=0 systemd.legacy_systemd_cgroup_controller cgroup.memory=nokmem

[cpu]
governor=performance
energy_perf_bias=performance
min_perf_pct=100

[vm]
transparent_hugepages=never

[sysctl]
kernel.numa_balancing = 1

#################
vm.dirty_ratio = 40
vm.dirty_background_ratio = 10
vm.swappiness=10
####
vm.zone_reclaim_mode=0

####################3
# TCP fast open reduces network latency by enabling data exchange
# during the sender's initial TCP SYN. The value 3 enables fast open
# on client and server connections.
net.ipv4.tcp_fastopen=3

[disk-vm]
type=disk
devices = vd*
elevator = mq-deadline

[disk-sas]
type=disk
devices = sd*
elevator = mq-deadline

[disk-nvme]
type=disk
devices = nvme*
elevator = none
desktop/tuned.conf000064400000000210150510234720010173 0ustar00#
# tuned configuration
#

[main]
summary=Optimize for the desktop use-case
include=balanced

[sysctl]
kernel.sched_autogroup_enabled=1
version.py000064400000000253150510354010006575 0ustar00TUNED_VERSION_MAJOR = 2
TUNED_VERSION_MINOR = 22
TUNED_VERSION_PATCH = 1

TUNED_VERSION_STR = "%d.%d.%d" % (TUNED_VERSION_MAJOR, TUNED_VERSION_MINOR, TUNED_VERSION_PATCH)
plugins/plugin_sysctl.py000064400000015372150510354010011500 0ustar00import re
from . import base
from .decorators import *
import tuned.logs
from subprocess import *
from tuned.utils.commands import commands
import tuned.consts as consts
import errno
import os

log = tuned.logs.get()

DEPRECATED_SYSCTL_OPTIONS = [ "base_reachable_time", "retrans_time" ]
SYSCTL_CONFIG_DIRS = [ "/run/sysctl.d",
		"/etc/sysctl.d" ]

class SysctlPlugin(base.Plugin):
	"""
	`sysctl`::
	
	Sets various kernel parameters at runtime.
	+
	This plug-in is used for applying custom `sysctl` settings and should
	only be used to change system settings that are not covered by other
	*TuneD* plug-ins. If the settings are covered by other *TuneD* plug-ins,
	use those plug-ins instead.
	+
	The syntax for this plug-in is
	`_key_=_value_`, where
	`_key_` is the same as the key name provided by the
	`sysctl` utility.
	+
	.Adjusting the kernel runtime kernel.sched_min_granularity_ns value
	====
	----
	[sysctl]
	kernel.sched_min_granularity_ns=3000000
	----
	====
	"""

	def __init__(self, *args, **kwargs):
		super(SysctlPlugin, self).__init__(*args, **kwargs)
		self._has_dynamic_options = True
		self._cmd = commands()

	def _instance_init(self, instance):
		instance._has_dynamic_tuning = False
		instance._has_static_tuning = True

		# FIXME: do we want to do this here?
		# recover original values in case of crash
		storage_key = self._storage_key(instance.name)
		instance._sysctl_original = self._storage.get(storage_key, {})
		if len(instance._sysctl_original) > 0:
			log.info("recovering old sysctl settings from previous run")
			self._instance_unapply_static(instance)
			instance._sysctl_original = {}
			self._storage.unset(storage_key)

		instance._sysctl = instance.options

	def _instance_cleanup(self, instance):
		storage_key = self._storage_key(instance.name)
		self._storage.unset(storage_key)

	def _instance_apply_static(self, instance):
		for option, value in list(instance._sysctl.items()):
			original_value = self._read_sysctl(option)
			if original_value is None:
				log.error("sysctl option %s will not be set, failed to read the original value."
						% option)
			else:
				new_value = self._variables.expand(
						self._cmd.unquote(value))
				new_value = self._process_assignment_modifiers(
						new_value, original_value)
				if new_value is not None:
					instance._sysctl_original[option] = original_value
					self._write_sysctl(option, new_value)

		storage_key = self._storage_key(instance.name)
		self._storage.set(storage_key, instance._sysctl_original)

		if self._global_cfg.get_bool(consts.CFG_REAPPLY_SYSCTL, consts.CFG_DEF_REAPPLY_SYSCTL):
			log.info("reapplying system sysctl")
			self._apply_system_sysctl(instance._sysctl)

	def _instance_verify_static(self, instance, ignore_missing, devices):
		ret = True
		# override, so always skip missing
		ignore_missing = True
		for option, value in list(instance._sysctl.items()):
			curr_val = self._read_sysctl(option)
			value = self._process_assignment_modifiers(self._variables.expand(value), curr_val)
			if value is not None:
				if self._verify_value(option, self._cmd.remove_ws(value), self._cmd.remove_ws(curr_val), ignore_missing) == False:
					ret = False
		return ret

	def _instance_unapply_static(self, instance, rollback = consts.ROLLBACK_SOFT):
		for option, value in list(instance._sysctl_original.items()):
			self._write_sysctl(option, value)

	def _apply_system_sysctl(self, instance_sysctl):
		files = {}
		for d in SYSCTL_CONFIG_DIRS:
			try:
				flist = os.listdir(d)
			except OSError:
				continue
			for fname in flist:
				if not fname.endswith(".conf"):
					continue
				if fname not in files:
					files[fname] = d

		for fname in sorted(files.keys()):
			d = files[fname]
			path = "%s/%s" % (d, fname)
			self._apply_sysctl_config_file(path, instance_sysctl)
		self._apply_sysctl_config_file("/etc/sysctl.conf", instance_sysctl)

	def _apply_sysctl_config_file(self, path, instance_sysctl):
		log.debug("Applying sysctl settings from file %s" % path)
		try:
			with open(path, "r") as f:
				for lineno, line in enumerate(f, 1):
					self._apply_sysctl_config_line(path, lineno, line, instance_sysctl)
			log.debug("Finished applying sysctl settings from file %s"
					% path)
		except (OSError, IOError) as e:
			if e.errno != errno.ENOENT:
				log.error("Error reading sysctl settings from file %s: %s"
						% (path, str(e)))

	def _apply_sysctl_config_line(self, path, lineno, line, instance_sysctl):
		line = line.strip()
		if len(line) == 0 or line[0] == "#" or line[0] == ";":
			return
		tmp = line.split("=", 1)
		if len(tmp) != 2:
			log.error("Syntax error in file %s, line %d"
					% (path, lineno))
			return
		option, value = tmp
		option = option.strip()
		if len(option) == 0:
			log.error("Syntax error in file %s, line %d"
					% (path, lineno))
			return
		value = value.strip()
		if option in instance_sysctl:
			instance_value = self._variables.expand(instance_sysctl[option])
			if instance_value != value:
				log.info("Overriding sysctl parameter '%s' from '%s' to '%s'"
						% (option, instance_value, value))
		self._write_sysctl(option, value, ignore_missing = True)

	def _get_sysctl_path(self, option):
		# The sysctl name in sysctl tool and in /proc/sys differs.
		# All dots (.) in sysctl name are represented by /proc/sys
		# directories and all slashes in the name (/) are converted
		# to dots (.) in the /proc/sys filenames.
		return "/proc/sys/%s" % self._cmd.tr(option, "./", "/.")

	def _read_sysctl(self, option):
		path = self._get_sysctl_path(option)
		try:
			with open(path, "r") as f:
				line = ""
				for i, line in enumerate(f):
					if i > 0:
						log.error("Failed to read sysctl parameter '%s', multi-line values are unsupported"
								% option)
						return None
				value = line.strip()
			log.debug("Value of sysctl parameter '%s' is '%s'"
					% (option, value))
			return value
		except (OSError, IOError) as e:
			if e.errno == errno.ENOENT:
				log.error("Failed to read sysctl parameter '%s', the parameter does not exist"
						% option)
			else:
				log.error("Failed to read sysctl parameter '%s': %s"
						% (option, str(e)))
			return None

	def _write_sysctl(self, option, value, ignore_missing = False):
		path = self._get_sysctl_path(option)
		if os.path.basename(path) in DEPRECATED_SYSCTL_OPTIONS:
			log.error("Refusing to set deprecated sysctl option %s"
					% option)
			return False
		try:
			log.debug("Setting sysctl parameter '%s' to '%s'"
					% (option, value))
			with open(path, "w") as f:
				f.write(value)
			return True
		except (OSError, IOError) as e:
			if e.errno == errno.ENOENT:
				log_func = log.debug if ignore_missing else log.error
				log_func("Failed to set sysctl parameter '%s' to '%s', the parameter does not exist"
						% (option, value))
			else:
				log.error("Failed to set sysctl parameter '%s' to '%s': %s"
						% (option, value, str(e)))
			return False
plugins/plugin_acpi.py000064400000004616150510354010011072 0ustar00from . import base
from .decorators import *
import os
import errno
import tuned.logs
from tuned.consts import ACPI_DIR

log = tuned.logs.get()


class ACPIPlugin(base.Plugin):
	"""
	`acpi`::

	Configures the ACPI driver.
	+
	The only currently supported option is
	[option]`platform_profile`, which sets the ACPI
	platform profile sysfs attribute,
	a generic power/performance preference API for other drivers.
	Multiple profiles can be specified, separated by `|`.
	The first available profile is selected.
	+
	--
	.Selecting a platform profile
	====
	----
	[acpi]
	platform_profile=balanced|low-power
	----
	Using this option, *TuneD* will try to set the platform profile
	to `balanced`. If that fails, it will try to set it to `low-power`.
	====
	--
	"""
	def __init__(self, *args, **kwargs):
		super(ACPIPlugin, self).__init__(*args, **kwargs)

	@classmethod
	def _get_config_options(cls):
		return {"platform_profile": None}

	def _instance_init(self, instance):
		instance._has_static_tuning = True
		instance._has_dynamic_tuning = False

	def _instance_cleanup(self, instance):
		pass

	@classmethod
	def _platform_profile_choices_path(cls):
		return os.path.join(ACPI_DIR, "platform_profile_choices")

	@classmethod
	def _platform_profile_path(cls):
		return os.path.join(ACPI_DIR, "platform_profile")

	@command_set("platform_profile")
	def _set_platform_profile(self, profiles, sim, remove):
		if not os.path.isfile(self._platform_profile_path()):
			log.debug("ACPI platform_profile is not supported on this system")
			return None
		profiles = [profile.strip() for profile in profiles.split('|')]
		avail_profiles = set(self._cmd.read_file(self._platform_profile_choices_path()).split())
		for profile in profiles:
			if profile in avail_profiles:
				if not sim:
					log.info("Setting platform_profile to '%s'" % profile)
					self._cmd.write_to_file(self._platform_profile_path(), profile, \
						no_error=[errno.ENOENT] if remove else False)
				return profile
			log.warn("Requested platform_profile '%s' unavailable" % profile)
		log.error("Failed to set platform_profile. Is the value in the profile correct?")
		return None

	@command_get("platform_profile")
	def _get_platform_profile(self, ignore_missing=False):
		if not os.path.isfile(self._platform_profile_path()):
			log.debug("ACPI platform_profile is not supported on this system")
			return None
		return self._cmd.read_file(self._platform_profile_path()).strip()
plugins/plugin_disk.py000064400000041231150510354010011102 0ustar00import errno
from . import hotplug
from .decorators import *
import tuned.logs
import tuned.consts as consts
from tuned.utils.commands import commands
import os
import re

log = tuned.logs.get()

class DiskPlugin(hotplug.Plugin):
	"""
	`disk`::
	
	Plug-in for tuning various block device options. This plug-in can also
	dynamically change the advanced power management and spindown timeout
	setting for a drive according to the current drive utilization. The
	dynamic tuning is controlled by the [option]`dynamic` and the global
	[option]`dynamic_tuning` option in `tuned-main.conf`.
	+
	The disk plug-in operates on all supported block devices unless a
	comma separated list of [option]`devices` is passed to it.
	+
	.Operate only on the sda block device
	====
	----
	[disk]
	# Comma separated list of devices, all devices if commented out.
	devices=sda
	----
	====
	+
	The [option]`elevator` option sets the Linux I/O scheduler.
	+
	.Use the bfq I/O scheduler on xvda block device
	====
	----
	[disk]
	device=xvda
	elevator=bfq
	----
	====
	+
	The [option]`scheduler_quantum` option only applies to the CFQ I/O
	scheduler. It defines the number of I/O requests that CFQ sends to
	one device at one time, essentially limiting queue depth. The default
	value is 8 requests. The device being used may support greater queue
	depth, but increasing the value of quantum will also increase latency,
	especially for large sequential write work loads.
	+
	The [option]`apm` option sets the Advanced Power Management feature
	on drives that support it. It corresponds to using the `-B` option of
	the `hdparm` utility. The [option]`spindown` option puts the drive
	into idle (low-power) mode, and also sets the standby (spindown)
	timeout for the drive. It corresponds to using `-S` option of the
	`hdparm` utility.
	+
	.Use a medium-agressive power management with spindown
	====
	----
	[disk]
	apm=128
	spindown=6
	----
	====
	+
	The [option]`readahead` option controls how much extra data the
	operating system reads from disk when performing sequential
	I/O operations. Increasing the `readahead` value might improve
	performance in application environments where sequential reading of
	large files takes place. The default unit for readahead is KiB. This
	can be adjusted to sectors by specifying the suffix 's'. If the
	suffix is specified, there must be at least one space between the
	number and suffix (for example, `readahead=8192 s`).
	+
	.Set the `readahead` to 4MB unless already set to a higher value
	====
	----
	[disk]
	readahead=>4096
	----
	====
	The disk readahead value can be multiplied by the constant
	specified by the [option]`readahead_multiply` option.
	"""

	def __init__(self, *args, **kwargs):
		super(DiskPlugin, self).__init__(*args, **kwargs)

		self._power_levels = [254, 225, 195, 165, 145, 125, 105, 85, 70, 55, 30, 20]
		self._spindown_levels = [0, 250, 230, 210, 190, 170, 150, 130, 110, 90, 70, 60]
		self._levels = len(self._power_levels)
		self._level_steps = 6
		self._load_smallest = 0.01
		self._cmd = commands()

	def _init_devices(self):
		super(DiskPlugin, self)._init_devices()
		self._devices_supported = True
		self._use_hdparm = True
		self._free_devices = set()
		self._hdparm_apm_device_support = dict()
		for device in self._hardware_inventory.get_devices("block"):
			if self._device_is_supported(device):
				self._free_devices.add(device.sys_name)
		self._assigned_devices = set()

	def _get_device_objects(self, devices):
		return [self._hardware_inventory.get_device("block", x) for x in devices]

	def _is_hdparm_apm_supported(self, device):
		if not self._use_hdparm:
			return False
		if device in self._hdparm_apm_device_support:
			return self._hdparm_apm_device_support[device]
		(rc, out, err_msg) = self._cmd.execute(["hdparm", "-C", "/dev/%s" % device], \
				no_errors = [errno.ENOENT], return_err=True)
		if rc == -errno.ENOENT:
			log.warn("hdparm command not found, ignoring for other devices")
			self._use_hdparm = False
			return False
		elif rc:
			log.info("Device '%s' not supported by hdparm" % device)
			log.debug("(rc: %s, msg: '%s')" % (rc, err_msg))
			self._hdparm_apm_device_support[device] = False
			return False
		elif "unknown" in out:
			log.info("Driver for device '%s' does not support apm command" % device)
			self._hdparm_apm_device_support[device] = False
			return False
		self._hdparm_apm_device_support[device] = True
		return True

	@classmethod
	def _device_is_supported(cls, device):
		return  device.device_type == "disk" and \
			device.attributes.get("removable", None) == b"0" and \
			(device.parent is None or \
					device.parent.subsystem in ["scsi", "virtio", "xen", "nvme"])

	def _hardware_events_init(self):
		self._hardware_inventory.subscribe(self, "block", self._hardware_events_callback)

	def _hardware_events_cleanup(self):
		self._hardware_inventory.unsubscribe(self)

	def _hardware_events_callback(self, event, device):
		if self._device_is_supported(device) or event == "remove":
			super(DiskPlugin, self)._hardware_events_callback(event, device)

	def _added_device_apply_tuning(self, instance, device_name):
		if instance._load_monitor is not None:
			instance._load_monitor.add_device(device_name)
		super(DiskPlugin, self)._added_device_apply_tuning(instance, device_name)

	def _removed_device_unapply_tuning(self, instance, device_name):
		if instance._load_monitor is not None:
			instance._load_monitor.remove_device(device_name)
		super(DiskPlugin, self)._removed_device_unapply_tuning(instance, device_name)

	@classmethod
	def _get_config_options(cls):
		return {
			"dynamic"            : True, # FIXME: do we want this default?
			"elevator"           : None,
			"apm"                : None,
			"spindown"           : None,
			"readahead"          : None,
			"readahead_multiply" : None,
			"scheduler_quantum"  : None,
		}

	@classmethod
	def _get_config_options_used_by_dynamic(cls):
		return [
			"apm",
			"spindown",
		]

	def _instance_init(self, instance):
		instance._has_static_tuning = True

		self._apm_errcnt = 0
		self._spindown_errcnt = 0

		if self._option_bool(instance.options["dynamic"]):
			instance._has_dynamic_tuning = True
			instance._load_monitor = \
					self._monitors_repository.create(
					"disk", instance.assigned_devices)
			instance._device_idle = {}
			instance._stats = {}
			instance._idle = {}
			instance._spindown_change_delayed = {}
		else:
			instance._has_dynamic_tuning = False
			instance._load_monitor = None

	def _instance_cleanup(self, instance):
		if instance._load_monitor is not None:
			self._monitors_repository.delete(instance._load_monitor)
			instance._load_monitor = None

	def _update_errcnt(self, rc, spindown):
		if spindown:
			s = "spindown"
			cnt = self._spindown_errcnt
		else:
			s = "apm"
			cnt = self._apm_errcnt
		if cnt >= consts.ERROR_THRESHOLD:
			return
		if rc == 0:
			cnt = 0
		elif rc == -errno.ENOENT:
			self._spindown_errcnt = self._apm_errcnt = consts.ERROR_THRESHOLD + 1
			log.warn("hdparm command not found, ignoring future set_apm / set_spindown commands")
			return
		else:
			cnt += 1
			if cnt == consts.ERROR_THRESHOLD:
				log.info("disabling set_%s command: too many consecutive errors" % s)
		if spindown:
			self._spindown_errcnt = cnt
		else:
			self._apm_errcnt = cnt

	def _change_spindown(self, instance, device, new_spindown_level):
		log.debug("changing spindown to %d" % new_spindown_level)
		(rc, out) = self._cmd.execute(["hdparm", "-S%d" % new_spindown_level, "/dev/%s" % device], no_errors = [errno.ENOENT])
		self._update_errcnt(rc, True)
		instance._spindown_change_delayed[device] = False

	def _drive_spinning(self, device):
		(rc, out) = self._cmd.execute(["hdparm", "-C", "/dev/%s" % device], no_errors = [errno.ENOENT])
		return not "standby" in out and not "sleeping" in out

	def _instance_update_dynamic(self, instance, device):
		if not self._is_hdparm_apm_supported(device):
			return
		load = instance._load_monitor.get_device_load(device)
		if load is None:
			return

		if not device in instance._stats:
			self._init_stats_and_idle(instance, device)

		self._update_stats(instance, device, load)
		self._update_idle(instance, device)

		stats = instance._stats[device]
		idle = instance._idle[device]

		# level change decision

		if idle["level"] + 1 < self._levels and idle["read"] >= self._level_steps and idle["write"] >= self._level_steps:
			level_change = 1
		elif idle["level"] > 0 and (idle["read"] == 0 or idle["write"] == 0):
			level_change = -1
		else:
			level_change = 0

		# change level if decided

		if level_change != 0:
			idle["level"] += level_change
			new_power_level = self._power_levels[idle["level"]]
			new_spindown_level = self._spindown_levels[idle["level"]]

			log.debug("tuning level changed to %d" % idle["level"])
			if self._spindown_errcnt < consts.ERROR_THRESHOLD:
				if not self._drive_spinning(device) and level_change > 0:
					log.debug("delaying spindown change to %d, drive has already spun down" % new_spindown_level)
					instance._spindown_change_delayed[device] = True
				else:
					self._change_spindown(instance, device, new_spindown_level)
			if self._apm_errcnt < consts.ERROR_THRESHOLD:
				log.debug("changing APM_level to %d" % new_power_level)
				(rc, out) = self._cmd.execute(["hdparm", "-B%d" % new_power_level, "/dev/%s" % device], no_errors = [errno.ENOENT])
				self._update_errcnt(rc, False)
		elif instance._spindown_change_delayed[device] and self._drive_spinning(device):
			new_spindown_level = self._spindown_levels[idle["level"]]
			self._change_spindown(instance, device, new_spindown_level)

		log.debug("%s load: read %0.2f, write %0.2f" % (device, stats["read"], stats["write"]))
		log.debug("%s idle: read %d, write %d, level %d" % (device, idle["read"], idle["write"], idle["level"]))

	def _init_stats_and_idle(self, instance, device):
		instance._stats[device] = { "new": 11 * [0], "old": 11 * [0], "max": 11 * [1] }
		instance._idle[device] = { "level": 0, "read": 0, "write": 0 }
		instance._spindown_change_delayed[device] = False

	def _update_stats(self, instance, device, new_load):
		instance._stats[device]["old"] = old_load = instance._stats[device]["new"]
		instance._stats[device]["new"] = new_load

		# load difference
		diff = [new_old[0] - new_old[1] for new_old in zip(new_load, old_load)]
		instance._stats[device]["diff"] = diff

		# adapt maximum expected load if the difference is higher
		old_max_load = instance._stats[device]["max"]
		max_load = [max(pair) for pair in zip(old_max_load, diff)]
		instance._stats[device]["max"] = max_load

		# read/write ratio
		instance._stats[device]["read"] =  float(diff[1]) / float(max_load[1])
		instance._stats[device]["write"] = float(diff[5]) / float(max_load[5])

	def _update_idle(self, instance, device):
		# increase counter if there is no load, otherwise reset the counter
		for operation in ["read", "write"]:
			if instance._stats[device][operation] < self._load_smallest:
				instance._idle[device][operation] += 1
			else:
				instance._idle[device][operation] = 0

	def _instance_apply_dynamic(self, instance, device):
		# At the moment we support dynamic tuning just for devices compatible with hdparm apm commands
		# If in future will be added new functionality not connected to this command,
		# it is needed to change it here
		if not self._is_hdparm_apm_supported(device):
			log.info("There is no dynamic tuning available for device '%s' at time" % device)
		else:
			super(DiskPlugin, self)._instance_apply_dynamic(instance, device)

	def _instance_unapply_dynamic(self, instance, device):
		pass

	def _sysfs_path(self, device, suffix, prefix = "/sys/block/"):
		if "/" in device:
			dev = os.path.join(prefix, device.replace("/", "!"), suffix)
			if os.path.exists(dev):
				return dev
		return os.path.join(prefix, device, suffix)

	def _elevator_file(self, device):
		return self._sysfs_path(device, "queue/scheduler")

	@command_set("elevator", per_device=True)
	def _set_elevator(self, value, device, sim, remove):
		sys_file = self._elevator_file(device)
		if not sim:
			self._cmd.write_to_file(sys_file, value, \
				no_error = [errno.ENOENT] if remove else False)
		return value

	@command_get("elevator")
	def _get_elevator(self, device, ignore_missing=False):
		sys_file = self._elevator_file(device)
		# example of scheduler file content:
		# noop deadline [cfq]
		return self._cmd.get_active_option(self._cmd.read_file(sys_file, no_error=ignore_missing))

	@command_set("apm", per_device=True)
	def _set_apm(self, value, device, sim, remove):
		if not self._is_hdparm_apm_supported(device):
			if not sim:
				log.info("apm option is not supported for device '%s'" % device)
				return None
			else:
				return str(value)
		if self._apm_errcnt < consts.ERROR_THRESHOLD:
			if not sim:
				(rc, out) = self._cmd.execute(["hdparm", "-B", str(value), "/dev/" + device], no_errors = [errno.ENOENT])
				self._update_errcnt(rc, False)
			return str(value)
		else:
			return None

	@command_get("apm")
	def _get_apm(self, device, ignore_missing=False):
		if not self._is_hdparm_apm_supported(device):
			if not ignore_missing:
				log.info("apm option is not supported for device '%s'" % device)
			return None
		value = None
		err = False
		(rc, out) = self._cmd.execute(["hdparm", "-B", "/dev/" + device], no_errors = [errno.ENOENT])
		if rc == -errno.ENOENT:
			return None
		elif rc != 0:
			err = True
		else:
			m = re.match(r".*=\s*(\d+).*", out, re.S)
			if m:
				try:
					value = int(m.group(1))
				except ValueError:
					err = True
		if err:
			log.error("could not get current APM settings for device '%s'" % device)
		return value

	@command_set("spindown", per_device=True)
	def _set_spindown(self, value, device, sim, remove):
		if not self._is_hdparm_apm_supported(device):
			if not sim:
				log.info("spindown option is not supported for device '%s'" % device)
				return None
			else:
				return str(value)
		if self._spindown_errcnt < consts.ERROR_THRESHOLD:
			if not sim:
				(rc, out) = self._cmd.execute(["hdparm", "-S", str(value), "/dev/" + device], no_errors = [errno.ENOENT])
				self._update_errcnt(rc, True)
			return str(value)
		else:
			return None

	@command_get("spindown")
	def _get_spindown(self, device, ignore_missing=False):
		if not self._is_hdparm_apm_supported(device):
			if not ignore_missing:
				log.info("spindown option is not supported for device '%s'" % device)
			return None
		# There's no way how to get current/old spindown value, hardcoding vendor specific 253
		return 253

	def _readahead_file(self, device):
		return self._sysfs_path(device, "queue/read_ahead_kb")

	def _parse_ra(self, value):
		val = str(value).split(None, 1)
		try:
			v = int(val[0])
		except ValueError:
			return None
		if len(val) > 1 and val[1][0] == "s":
			# v *= 512 / 1024
			v /= 2
		return v

	@command_set("readahead", per_device=True)
	def _set_readahead(self, value, device, sim, remove):
		sys_file = self._readahead_file(device)
		val = self._parse_ra(value)
		if val is None:
			log.error("Invalid readahead value '%s' for device '%s'" % (value, device))
		else:
			if not sim:
				self._cmd.write_to_file(sys_file, "%d" % val, \
					no_error = [errno.ENOENT] if remove else False)
		return val

	@command_get("readahead")
	def _get_readahead(self, device, ignore_missing=False):
		sys_file = self._readahead_file(device)
		value = self._cmd.read_file(sys_file, no_error=ignore_missing).strip()
		if len(value) == 0:
			return None
		return int(value)

	@command_custom("readahead_multiply", per_device=True)
	def _multiply_readahead(self, enabling, multiplier, device, verify, ignore_missing):
		if verify:
			return None
		storage_key = self._storage_key(
				command_name = "readahead_multiply",
				device_name = device)
		if enabling:
			old_readahead = self._get_readahead(device)
			if old_readahead is None:
				return
			new_readahead = int(float(multiplier) * old_readahead)
			self._storage.set(storage_key, old_readahead)
			self._set_readahead(new_readahead, device, False)
		else:
			old_readahead = self._storage.get(storage_key)
			if old_readahead is None:
				return
			self._set_readahead(old_readahead, device, False)
			self._storage.unset(storage_key)

	def _scheduler_quantum_file(self, device):
		return self._sysfs_path(device, "queue/iosched/quantum")

	@command_set("scheduler_quantum", per_device=True)
	def _set_scheduler_quantum(self, value, device, sim, remove):
		sys_file = self._scheduler_quantum_file(device)
		if not sim:
			self._cmd.write_to_file(sys_file, "%d" % int(value), \
				no_error = [errno.ENOENT] if remove else False)
		return value

	@command_get("scheduler_quantum")
	def _get_scheduler_quantum(self, device, ignore_missing=False):
		sys_file = self._scheduler_quantum_file(device)
		value = self._cmd.read_file(sys_file, no_error=ignore_missing).strip()
		if len(value) == 0:
			if not ignore_missing:
				log.info("disk_scheduler_quantum option is not supported for device '%s'" % device)
			return None
		return int(value)
plugins/plugin_sysfs.py000064400000005206150510354010011321 0ustar00from . import base
import glob
import re
import os.path
from .decorators import *
import tuned.logs
import tuned.consts as consts
from subprocess import *
from tuned.utils.commands import commands

log = tuned.logs.get()

class SysfsPlugin(base.Plugin):
	"""
	`sysfs`::
	
	Sets various `sysfs` settings specified by the plug-in options.
	+
	The syntax is `_name_=_value_`, where
	`_name_` is the `sysfs` path to use and `_value_` is
	the value to write. The `sysfs` path supports the shell-style
	wildcard characters (see `man 7 glob` for additional detail).
	+
	Use this plugin in case you need to change some settings that are
	not covered by other plug-ins. Prefer specific plug-ins if they
	cover the required settings.
	+
	.Ignore corrected errors and associated scans that cause latency spikes
	====
	----
	[sysfs]
	/sys/devices/system/machinecheck/machinecheck*/ignore_ce=1
	----
	====
	"""

	# TODO: resolve possible conflicts with sysctl settings from other plugins

	def __init__(self, *args, **kwargs):
		super(SysfsPlugin, self).__init__(*args, **kwargs)
		self._has_dynamic_options = True
		self._cmd = commands()

	def _instance_init(self, instance):
		instance._has_dynamic_tuning = False
		instance._has_static_tuning = True

		instance._sysfs = dict([(os.path.normpath(key_value[0]), key_value[1]) for key_value in list(instance.options.items())])
		instance._sysfs_original = {}

	def _instance_cleanup(self, instance):
		pass

	def _instance_apply_static(self, instance):
		for key, value in list(instance._sysfs.items()):
			v = self._variables.expand(value)
			for f in glob.iglob(key):
				if self._check_sysfs(f):
					instance._sysfs_original[f] = self._read_sysfs(f)
					self._write_sysfs(f, v)
				else:
					log.error("rejecting write to '%s' (not inside /sys)" % f)

	def _instance_verify_static(self, instance, ignore_missing, devices):
		ret = True
		for key, value in list(instance._sysfs.items()):
			v = self._variables.expand(value)
			for f in glob.iglob(key):
				if self._check_sysfs(f):
					curr_val = self._read_sysfs(f)
					if self._verify_value(f, v, curr_val, ignore_missing) == False:
						ret = False
		return ret

	def _instance_unapply_static(self, instance, rollback = consts.ROLLBACK_SOFT):
		for key, value in list(instance._sysfs_original.items()):
			self._write_sysfs(key, value)

	def _check_sysfs(self, sysfs_file):
		return re.match(r"^/sys/.*", sysfs_file)

	def _read_sysfs(self, sysfs_file):
		data = self._cmd.read_file(sysfs_file).strip()
		if len(data) > 0:
			return self._cmd.get_active_option(data, False)
		else:
			return None

	def _write_sysfs(self, sysfs_file, value):
		return self._cmd.write_to_file(sysfs_file, value)
plugins/plugin_bootloader.py000064400000062472150510354010012314 0ustar00from . import base
from .decorators import *
import tuned.logs
from . import exceptions
from tuned.utils.commands import commands
import tuned.consts as consts

import os
import re
import tempfile
from time import sleep

log = tuned.logs.get()

class BootloaderPlugin(base.Plugin):
	"""
	`bootloader`::
	
	Adds options to the kernel command line. This plug-in supports the
	GRUB 2 boot loader and the Boot Loader Specification (BLS).
	+
	NOTE: *TuneD* will not remove or replace kernel command line
	parameters added via other methods like *grubby*. *TuneD* will manage
	the kernel command line parameters added via *TuneD*. Please refer
	to your platform bootloader documentation about how to identify and
	manage kernel command line parameters set outside of *TuneD*.
	+
	Customized non-standard location of the GRUB 2 configuration file
	can be specified by the [option]`grub2_cfg_file` option.
	+
	The kernel options are added to the current GRUB configuration and
	its templates. Reboot the system for the kernel option to take effect.
	+
	Switching to another profile or manually stopping the `tuned`
	service removes the additional options. If you shut down or reboot
	the system, the kernel options persist in the [filename]`grub.cfg`
	file and grub environment files.
	+
	The kernel options can be specified by the following syntax:
	+
	[subs="+quotes,+macros"]
	----
	cmdline__suffix__=__arg1__ __arg2__ ... __argN__
	----
	+
	Or with an alternative, but equivalent syntax:
	+
	[subs="+quotes,+macros"]
	----
	cmdline__suffix__=+__arg1__ __arg2__ ... __argN__
	----
	+
	Where __suffix__ can be arbitrary (even empty) alphanumeric
	string which should be unique across all loaded profiles. It is
	recommended to use the profile name as the __suffix__
	(for example, [option]`cmdline_my_profile`). If there are multiple
	[option]`cmdline` options with the same suffix, during the profile
	load/merge the value which was assigned previously will be used. This
	is the same behavior as any other plug-in options. The final kernel
	command line is constructed by concatenating all the resulting
	[option]`cmdline` options.
	+
	It is also possible to remove kernel options by the following syntax:
	+
	[subs="+quotes,+macros"]
	----
	cmdline__suffix__=-__arg1__ __arg2__ ... __argN__
	----
	+
	Such kernel options will not be concatenated and thus removed during
	the final kernel command line construction.
	+
	.Modifying the kernel command line
	====
	For example, to add the [option]`quiet` kernel option to a *TuneD*
	profile, include the following lines in the [filename]`tuned.conf`
	file:
	
	----
	[bootloader]
	cmdline_my_profile=+quiet
	----
	
	An example of a custom profile `my_profile` that adds the
	[option]`isolcpus=2` option to the kernel command line:
	
	----
	[bootloader]
	cmdline_my_profile=isolcpus=2
	----
	
	An example of a custom profile `my_profile` that removes the
	[option]`rhgb quiet` options from the kernel command line (if
	previously added by *TuneD*):
	
	----
	[bootloader]
	cmdline_my_profile=-rhgb quiet
	----
	====
	+
	.Modifying the kernel command line, example with inheritance
	====
	For example, to add the [option]`rhgb quiet` kernel options to a
	*TuneD* profile `profile_1`:
	
	----
	[bootloader]
	cmdline_profile_1=+rhgb quiet
	----
	
	In the child profile `profile_2` drop the [option]`quiet` option
	from the kernel command line:
	
	----
	[main]
	include=profile_1
	
	[bootloader]
	cmdline_profile_2=-quiet
	----
	
	The final kernel command line will be [option]`rhgb`. In case the same
	[option]`cmdline` suffix as in the `profile_1` is used:
	
	----
	[main]
	include=profile_1
	
	[bootloader]
	cmdline_profile_1=-quiet
	----
	
	It will result in the empty kernel command line because the merge
	executes and the [option]`cmdline_profile_1` gets redefined to just
	[option]`-quiet`. Thus there is nothing to remove in the final kernel
	command line processing.
	====
	+
	The [option]`initrd_add_img=IMAGE` adds an initrd overlay file
	`IMAGE`. If the `IMAGE` file name begins with '/', the absolute path is
	used. Otherwise, the current profile directory is used as the base
	directory for the `IMAGE`.
	+
	The [option]`initrd_add_dir=DIR` creates an initrd image from the
	directory `DIR` and adds the resulting image as an overlay.
	If the `DIR` directory name begins with '/', the absolute path
	is used. Otherwise, the current profile directory is used as the
	base directory for the `DIR`.
	+
	The [option]`initrd_dst_img=PATHNAME` sets the name and location of
	the resulting initrd image. Typically, it is not necessary to use this
	option. By default, the location of initrd images is `/boot` and the
	name of the image is taken as the basename of `IMAGE` or `DIR`. This can
	be overridden by setting [option]`initrd_dst_img`.
	+
	The [option]`initrd_remove_dir=VALUE` removes the source directory
	from which the initrd image was built if `VALUE` is true. Only 'y',
	'yes', 't', 'true' and '1' (case insensitive) are accepted as true
	values for this option. Other values are interpreted as false.
	+
	.Adding an overlay initrd image
	====
	----
	[bootloader]
	initrd_remove_dir=True
	initrd_add_dir=/tmp/tuned-initrd.img
	----
	
	This creates an initrd image from the `/tmp/tuned-initrd.img` directory
	and and then removes the `tuned-initrd.img` directory from `/tmp`.
	====
	+
	The [option]`skip_grub_config=VALUE` does not change grub
	configuration if `VALUE` is true. However, [option]`cmdline`
	options are still processed, and the result is used to verify the current
	cmdline. Only 'y', 'yes', 't', 'true' and '1' (case insensitive) are accepted
	as true values for this option. Other values are interpreted as false.
	+
	.Do not change grub configuration
	====
	----
	[bootloader]
	skip_grub_config=True
	cmdline=+systemd.cpu_affinity=1
	----
	====
	"""

	def __init__(self, *args, **kwargs):
		if not os.path.isfile(consts.GRUB2_TUNED_TEMPLATE_PATH):
			raise exceptions.NotSupportedPluginException("Required GRUB2 template not found, disabling plugin.")
		super(BootloaderPlugin, self).__init__(*args, **kwargs)
		self._cmd = commands()

	def _instance_init(self, instance):
		instance._has_dynamic_tuning = False
		instance._has_static_tuning = True
		# controls grub2_cfg rewrites in _instance_post_static
		self.update_grub2_cfg = False
		self._skip_grub_config_val = False
		self._initrd_remove_dir = False
		self._initrd_dst_img_val = None
		self._cmdline_val = ""
		self._initrd_val = ""
		self._grub2_cfg_file_names = self._get_grub2_cfg_files()
		self._bls = self._bls_enabled()

		self._rpm_ostree = self._rpm_ostree_status() is not None

	def _instance_cleanup(self, instance):
		pass

	@classmethod
	def _get_config_options(cls):
		return {
			"grub2_cfg_file": None,
			"initrd_dst_img": None,
			"initrd_add_img": None,
			"initrd_add_dir": None,
			"initrd_remove_dir": None,
			"cmdline": None,
			"skip_grub_config": None,
		}

	@staticmethod
	def _options_to_dict(options, omit=""):
		"""
		Returns dict created from options
		e.g.: _options_to_dict("A=A A=B A B=A C=A", "A=B B=A B=B") returns {'A': ['A', None], 'C': ['A']}
		"""
		d = {}
		omit = omit.split()
		for o in options.split():
			if o not in omit:
				arr = o.split('=', 1)
				d.setdefault(arr[0], []).append(arr[1] if len(arr) > 1 else None)
		return d

	@staticmethod
	def _dict_to_options(d):
		return " ".join([k + "=" + v1 if v1 is not None else k for k, v in d.items() for v1 in v])

	def _rpm_ostree_status(self):
		"""
		Returns status of rpm-ostree transactions or None if not run on rpm-ostree system
		"""
		(rc, out, err) = self._cmd.execute(['rpm-ostree', 'status'], return_err=True)
		log.debug("rpm-ostree status output stdout:\n%s\nstderr:\n%s" % (out, err))
		if rc != 0:
			return None
		splited = out.split()
		if len(splited) < 2 or splited[0] != "State:":
			log.warn("Exceptional format of rpm-ostree status result:\n%s" % out)
			return None
		return splited[1]

	def _wait_till_idle(self):
		sleep_cycles = 10
		sleep_secs = 1.0
		for i in range(sleep_cycles):
			if self._rpm_ostree_status() == "idle":
				return True
			sleep(sleep_secs)
		if self._rpm_ostree_status() == "idle":
			return True
		return False

	def _rpm_ostree_kargs(self, append={}, delete={}):
		"""
		Method for appending or deleting rpm-ostree karg
		returns None if rpm-ostree not present or is run on not ostree system
		or tuple with new kargs, appended kargs and deleted kargs
		"""
		(rc, out, err) = self._cmd.execute(['rpm-ostree', 'kargs'], return_err=True)
		log.debug("rpm-ostree output stdout:\n%s\nstderr:\n%s" % (out, err))
		if rc != 0:
			return None, None, None
		kargs = self._options_to_dict(out)

		if not self._wait_till_idle():
			log.error("Cannot wait for transaction end")
			return None, None, None

		deleted = {}
		delete_params = self._dict_to_options(delete).split()
		# Deleting kargs, e.g. deleting added kargs by profile
		for k, val in delete.items():
			for v in val:
				kargs[k].remove(v)
			deleted[k] = val

		appended = {}
		append_params = self._dict_to_options(append).split()
		# Appending kargs, e.g. new kargs by profile or restoring kargs replaced by profile
		for k, val in append.items():
			if kargs.get(k):
				# If there is karg that we add with new value we want to delete it
				# and store old value for restoring after profile unload
				log.debug("adding rpm-ostree kargs %s: %s for delete" % (k, kargs[k]))
				deleted.setdefault(k, []).extend(kargs[k])
				delete_params.extend([k + "=" + v if v is not None else k for v in kargs[k]])
				kargs[k] = []
			kargs.setdefault(k, []).extend(val)
			appended[k] = val

		if append_params == delete_params:
			log.info("skipping rpm-ostree kargs - append == deleting (%s)" % append_params)
			return kargs, appended, deleted

		log.info("rpm-ostree kargs - appending: '%s'; deleting: '%s'" % (append_params, delete_params))
		(rc, _, err) = self._cmd.execute(['rpm-ostree', 'kargs'] +
										 ['--append=%s' % v for v in append_params] +
										 ['--delete=%s' % v for v in delete_params], return_err=True)
		if rc != 0:
			log.error("Something went wrong with rpm-ostree kargs\n%s" % (err))
			return self._options_to_dict(out), None, None
		else:
			return kargs, appended, deleted

	def _get_effective_options(self, options):
		"""Merge provided options with plugin default options and merge all cmdline.* options."""
		effective = self._get_config_options().copy()
		cmdline_keys = []
		for key in options:
			if str(key).startswith("cmdline"):
				cmdline_keys.append(key)
			elif key in effective:
				effective[key] = options[key]
			else:
				log.warn("Unknown option '%s' for plugin '%s'." % (key, self.__class__.__name__))
		cmdline = ""
		for key in cmdline_keys:
			val = options[key]
			if val is None or val == "":
				continue
			op = val[0]
			op1 = val[1:2]
			vals = val[1:].strip()
			if op == "+" or (op == "\\" and op1 in ["\\", "+", "-"]):
				if vals != "":
					cmdline += " " + vals
			elif op == "-":
				if vals != "":
					for p in vals.split():
						regex = re.escape(p)
						cmdline = re.sub(r"(\A|\s)" + regex + r"(?=\Z|\s)", r"", cmdline)
			else:
				cmdline += " " + val
		cmdline = cmdline.strip()
		if cmdline != "":
			effective["cmdline"] = cmdline
		return effective

	def _get_grub2_cfg_files(self):
		cfg_files = []
		for f in consts.GRUB2_CFG_FILES:
			if os.path.exists(f):
				cfg_files.append(f)
		return cfg_files

	def _bls_enabled(self):
		grub2_default_env = self._cmd.read_file(consts.GRUB2_DEFAULT_ENV_FILE, no_error = True)
		if len(grub2_default_env) <= 0:
			log.info("cannot read '%s'" % consts.GRUB2_DEFAULT_ENV_FILE)
			return False

		return re.search(r"^\s*GRUB_ENABLE_BLSCFG\s*=\s*\"?\s*[tT][rR][uU][eE]\s*\"?\s*$", grub2_default_env,
			flags = re.MULTILINE) is not None

	def _patch_bootcmdline(self, d):
		return self._cmd.add_modify_option_in_file(consts.BOOT_CMDLINE_FILE, d)

	def _remove_grub2_tuning(self):
		self._patch_bootcmdline({consts.BOOT_CMDLINE_TUNED_VAR : "", consts.BOOT_CMDLINE_INITRD_ADD_VAR : ""})
		if not self._grub2_cfg_file_names:
			log.info("cannot find grub.cfg to patch")
			return
		for f in self._grub2_cfg_file_names:
			self._cmd.add_modify_option_in_file(f, {r"set\s+" + consts.GRUB2_TUNED_VAR : "", r"set\s+" + consts.GRUB2_TUNED_INITRD_VAR : ""}, add = False)
		if self._initrd_dst_img_val is not None:
			log.info("removing initrd image '%s'" % self._initrd_dst_img_val)
			self._cmd.unlink(self._initrd_dst_img_val)

	def _get_rpm_ostree_changes(self):
		f = self._cmd.read_file(consts.BOOT_CMDLINE_FILE)
		appended = re.search(consts.BOOT_CMDLINE_TUNED_VAR + r"=\"(.*)\"", f, flags=re.MULTILINE)
		appended = appended[1] if appended else ""
		deleted = re.search(consts.BOOT_CMDLINE_KARGS_DELETED_VAR + r"=\"(.*)\"", f, flags=re.MULTILINE)
		deleted = deleted[1] if deleted else ""
		return appended, deleted

	def _remove_rpm_ostree_tuning(self):
		appended, deleted = self._get_rpm_ostree_changes()
		self._rpm_ostree_kargs(append=self._options_to_dict(deleted), delete=self._options_to_dict(appended))
		self._patch_bootcmdline({consts.BOOT_CMDLINE_TUNED_VAR: "", consts.BOOT_CMDLINE_KARGS_DELETED_VAR: ""})

	def _instance_unapply_static(self, instance, rollback = consts.ROLLBACK_SOFT):
		if rollback == consts.ROLLBACK_FULL and not self._skip_grub_config_val:
			if self._rpm_ostree:
				log.info("removing rpm-ostree tuning previously added by Tuned")
				self._remove_rpm_ostree_tuning()
			else:
				log.info("removing grub2 tuning previously added by Tuned")
				self._remove_grub2_tuning()
				self._update_grubenv({"tuned_params" : "", "tuned_initrd" : ""})

	def _grub2_cfg_unpatch(self, grub2_cfg):
		log.debug("unpatching grub.cfg")
		cfg = re.sub(r"^\s*set\s+" + consts.GRUB2_TUNED_VAR + "\\s*=.*\n", "", grub2_cfg, flags = re.MULTILINE)
		grub2_cfg = re.sub(r" *\$" + consts.GRUB2_TUNED_VAR, "", cfg, flags = re.MULTILINE)
		cfg = re.sub(r"^\s*set\s+" + consts.GRUB2_TUNED_INITRD_VAR + "\\s*=.*\n", "", grub2_cfg, flags = re.MULTILINE)
		grub2_cfg = re.sub(r" *\$" + consts.GRUB2_TUNED_INITRD_VAR, "", cfg, flags = re.MULTILINE)
		cfg = re.sub(consts.GRUB2_TEMPLATE_HEADER_BEGIN + r"\n", "", grub2_cfg, flags = re.MULTILINE)
		return re.sub(consts.GRUB2_TEMPLATE_HEADER_END + r"\n+", "", cfg, flags = re.MULTILINE)

	def _grub2_cfg_patch_initial(self, grub2_cfg, d):
		log.debug("initial patching of grub.cfg")
		s = r"\1\n\n" + consts.GRUB2_TEMPLATE_HEADER_BEGIN + "\n"
		for opt in d:
			s += r"set " + self._cmd.escape(opt) + "=\"" + self._cmd.escape(d[opt]) + "\"\n"
		s += consts.GRUB2_TEMPLATE_HEADER_END + r"\n"
		grub2_cfg = re.sub(r"^(\s*###\s+END\s+[^#]+/00_header\s+### *)\n", s, grub2_cfg, flags = re.MULTILINE)

		d2 = {"linux" : consts.GRUB2_TUNED_VAR, "initrd" : consts.GRUB2_TUNED_INITRD_VAR}
		for i in d2:
			# add TuneD parameters to all kernels
			grub2_cfg = re.sub(r"^(\s*" + i + r"(16|efi)?\s+.*)$", r"\1 $" + d2[i], grub2_cfg, flags = re.MULTILINE)
			# remove TuneD parameters from rescue kernels
			grub2_cfg = re.sub(r"^(\s*" + i + r"(?:16|efi)?\s+\S+rescue.*)\$" + d2[i] + r" *(.*)$", r"\1\2", grub2_cfg, flags = re.MULTILINE)
			# fix whitespaces in rescue kernels
			grub2_cfg = re.sub(r"^(\s*" + i + r"(?:16|efi)?\s+\S+rescue.*) +$", r"\1", grub2_cfg, flags = re.MULTILINE)
		return grub2_cfg

	def _grub2_default_env_patch(self):
		grub2_default_env = self._cmd.read_file(consts.GRUB2_DEFAULT_ENV_FILE)
		if len(grub2_default_env) <= 0:
			log.info("cannot read '%s'" % consts.GRUB2_DEFAULT_ENV_FILE)
			return False

		d = {"GRUB_CMDLINE_LINUX_DEFAULT" : consts.GRUB2_TUNED_VAR, "GRUB_INITRD_OVERLAY" : consts.GRUB2_TUNED_INITRD_VAR}
		write = False
		for i in d:
			if re.search(r"^[^#]*\b" + i + r"\s*=.*\\\$" + d[i] + r"\b.*$", grub2_default_env, flags = re.MULTILINE) is None:
				write = True
				if grub2_default_env[-1] != "\n":
					grub2_default_env += "\n"
				grub2_default_env += i + "=\"${" + i + ":+$" + i + r" }\$" + d[i] + "\"\n"
		if write:
			log.debug("patching '%s'" % consts.GRUB2_DEFAULT_ENV_FILE)
			self._cmd.write_to_file(consts.GRUB2_DEFAULT_ENV_FILE, grub2_default_env)
		return True

	def _grub2_default_env_unpatch(self):
		grub2_default_env = self._cmd.read_file(consts.GRUB2_DEFAULT_ENV_FILE)
		if len(grub2_default_env) <= 0:
			log.info("cannot read '%s'" % consts.GRUB2_DEFAULT_ENV_FILE)
			return False

		write = False
		if re.search(r"^GRUB_CMDLINE_LINUX_DEFAULT=\"\$\{GRUB_CMDLINE_LINUX_DEFAULT:\+\$GRUB_CMDLINE_LINUX_DEFAULT \}\\\$" +
			consts.GRUB2_TUNED_VAR + "\"$", grub2_default_env, flags = re.MULTILINE):
				write = True
				cfg = re.sub(r"^GRUB_CMDLINE_LINUX_DEFAULT=\"\$\{GRUB_CMDLINE_LINUX_DEFAULT:\+\$GRUB_CMDLINE_LINUX_DEFAULT \}\\\$" +
					consts.GRUB2_TUNED_VAR + "\"$\n", "", grub2_default_env, flags = re.MULTILINE)
				if cfg[-1] != "\n":
					cfg += "\n"
		if write:
			log.debug("unpatching '%s'" % consts.GRUB2_DEFAULT_ENV_FILE)
			self._cmd.write_to_file(consts.GRUB2_DEFAULT_ENV_FILE, cfg)
		return True

	def _grub2_cfg_patch(self, d):
		log.debug("patching grub.cfg")
		if not self._grub2_cfg_file_names:
			log.info("cannot find grub.cfg to patch")
			return False
		for f in self._grub2_cfg_file_names:
			grub2_cfg = self._cmd.read_file(f)
			if len(grub2_cfg) <= 0:
				log.info("cannot patch %s" % f)
				continue
			log.debug("adding boot command line parameters to '%s'" % f)
			grub2_cfg_new = grub2_cfg
			patch_initial = False
			for opt in d:
				(grub2_cfg_new, nsubs) = re.subn(r"\b(set\s+" + opt + r"\s*=).*$", r"\1" + "\"" + self._cmd.escape(d[opt]) + "\"", grub2_cfg_new, flags = re.MULTILINE)
				if nsubs < 1 or re.search(r"\$" + opt, grub2_cfg, flags = re.MULTILINE) is None:
					patch_initial = True

			# workaround for rhbz#1442117
			if len(re.findall(r"\$" + consts.GRUB2_TUNED_VAR, grub2_cfg, flags = re.MULTILINE)) != \
				len(re.findall(r"\$" + consts.GRUB2_TUNED_INITRD_VAR, grub2_cfg, flags = re.MULTILINE)):
					patch_initial = True

			if patch_initial:
				grub2_cfg_new = self._grub2_cfg_patch_initial(self._grub2_cfg_unpatch(grub2_cfg), d)
			self._cmd.write_to_file(f, grub2_cfg_new)
		if self._bls:
			self._grub2_default_env_unpatch()
		else:
			self._grub2_default_env_patch()
		return True

	def _rpm_ostree_update(self):
		appended, _ = self._get_rpm_ostree_changes()
		_cmdline_dict = self._options_to_dict(self._cmdline_val, appended)
		if not _cmdline_dict:
			return None
		(_, _, d) = self._rpm_ostree_kargs(append=_cmdline_dict)
		if d is None:
			return
		self._patch_bootcmdline({consts.BOOT_CMDLINE_TUNED_VAR : self._cmdline_val, consts.BOOT_CMDLINE_KARGS_DELETED_VAR : self._dict_to_options(d)})

	def _grub2_update(self):
		self._grub2_cfg_patch({consts.GRUB2_TUNED_VAR : self._cmdline_val, consts.GRUB2_TUNED_INITRD_VAR : self._initrd_val})
		self._patch_bootcmdline({consts.BOOT_CMDLINE_TUNED_VAR : self._cmdline_val, consts.BOOT_CMDLINE_INITRD_ADD_VAR : self._initrd_val})

	def _has_bls(self):
		return os.path.exists(consts.BLS_ENTRIES_PATH)

	def _update_grubenv(self, d):
		log.debug("updating grubenv, setting %s" % str(d))
		l = ["%s=%s" % (str(option), str(value)) for option, value in d.items()]
		(rc, out) = self._cmd.execute(["grub2-editenv", "-", "set"] + l)
		if rc != 0:
			log.warn("cannot update grubenv: '%s'" % out)
			return False
		return True

	def _bls_entries_patch_initial(self):
		machine_id = self._cmd.get_machine_id()
		if machine_id == "":
			return False
		log.debug("running kernel update hook '%s' to patch BLS entries" % consts.KERNEL_UPDATE_HOOK_FILE)
		(rc, out) = self._cmd.execute([consts.KERNEL_UPDATE_HOOK_FILE, "add"], env = {"KERNEL_INSTALL_MACHINE_ID" : machine_id})
		if rc != 0:
			log.warn("cannot patch BLS entries: '%s'" % out)
			return False
		return True

	def _bls_update(self):
		log.debug("updating BLS")
		if self._has_bls() and \
			self._update_grubenv({"tuned_params" : self._cmdline_val, "tuned_initrd" : self._initrd_val}) and \
			self._bls_entries_patch_initial():
				return True
		return False

	def _init_initrd_dst_img(self, name):
		if self._initrd_dst_img_val is None:
			self._initrd_dst_img_val = os.path.join(consts.BOOT_DIR, os.path.basename(name))

	def _check_petitboot(self):
		return os.path.isdir(consts.PETITBOOT_DETECT_DIR)

	def _install_initrd(self, img):
		if self._rpm_ostree:
			log.warn("Detected rpm-ostree which doesn't support initrd overlays.")
			return False
		if self._check_petitboot():
			log.warn("Detected Petitboot which doesn't support initrd overlays. The initrd overlay will be ignored by bootloader.")
		log.info("installing initrd image as '%s'" % self._initrd_dst_img_val)
		img_name = os.path.basename(self._initrd_dst_img_val)
		if not self._cmd.copy(img, self._initrd_dst_img_val):
			return False
		self.update_grub2_cfg = True
		curr_cmdline = self._cmd.read_file("/proc/cmdline").rstrip()
		initrd_grubpath = "/"
		lc = len(curr_cmdline)
		if lc:
			path = re.sub(r"^\s*BOOT_IMAGE=\s*(?:\([^)]*\))?(\S*/).*$", "\\1", curr_cmdline)
			if len(path) < lc:
				initrd_grubpath = path
		self._initrd_val = os.path.join(initrd_grubpath, img_name)
		return True

	@command_custom("grub2_cfg_file")
	def _grub2_cfg_file(self, enabling, value, verify, ignore_missing):
		# nothing to verify
		if verify:
			return None
		if enabling and value is not None:
			self._grub2_cfg_file_names = [str(value)]

	@command_custom("initrd_dst_img")
	def _initrd_dst_img(self, enabling, value, verify, ignore_missing):
		# nothing to verify
		if verify:
			return None
		if enabling and value is not None:
			self._initrd_dst_img_val = str(value)
			if self._initrd_dst_img_val == "":
				return False
			if self._initrd_dst_img_val[0] != "/":
				self._initrd_dst_img_val = os.path.join(consts.BOOT_DIR, self._initrd_dst_img_val)

	@command_custom("initrd_remove_dir")
	def _initrd_remove_dir(self, enabling, value, verify, ignore_missing):
		# nothing to verify
		if verify:
			return None
		if enabling and value is not None:
			self._initrd_remove_dir = self._cmd.get_bool(value) == "1"

	@command_custom("initrd_add_img", per_device = False, priority = 10)
	def _initrd_add_img(self, enabling, value, verify, ignore_missing):
		# nothing to verify
		if verify:
			return None
		if enabling and value is not None:
			src_img = str(value)
			self._init_initrd_dst_img(src_img)
			if src_img == "":
				return False
			if not self._install_initrd(src_img):
				return False

	@command_custom("initrd_add_dir", per_device = False, priority = 10)
	def _initrd_add_dir(self, enabling, value, verify, ignore_missing):
		# nothing to verify
		if verify:
			return None
		if enabling and value is not None:
			src_dir = str(value)
			self._init_initrd_dst_img(src_dir)
			if src_dir == "":
				return False
			if not os.path.isdir(src_dir):
				log.error("error: cannot create initrd image, source directory '%s' doesn't exist" % src_dir)
				return False

			log.info("generating initrd image from directory '%s'" % src_dir)
			(fd, tmpfile) = tempfile.mkstemp(prefix = "tuned-bootloader-", suffix = ".tmp")
			log.debug("writing initrd image to temporary file '%s'" % tmpfile)
			os.close(fd)
			(rc, out) = self._cmd.execute("find . | cpio -co > %s" % tmpfile, cwd = src_dir, shell = True)
			log.debug("cpio log: %s" % out)
			if rc != 0:
				log.error("error generating initrd image")
				self._cmd.unlink(tmpfile, no_error = True)
				return False
			self._install_initrd(tmpfile)
			self._cmd.unlink(tmpfile)
			if self._initrd_remove_dir:
				log.info("removing directory '%s'" % src_dir)
				self._cmd.rmtree(src_dir)

	@command_custom("cmdline", per_device = False, priority = 10)
	def _cmdline(self, enabling, value, verify, ignore_missing):
		v = self._variables.expand(self._cmd.unquote(value))
		if verify:
			if self._rpm_ostree:
				rpm_ostree_kargs = self._rpm_ostree_kargs()[0]
				cmdline = self._dict_to_options(rpm_ostree_kargs)
			else:
				cmdline = self._cmd.read_file("/proc/cmdline")
			if len(cmdline) == 0:
				return None
			cmdline_set = set(cmdline.split())
			value_set = set(v.split())
			missing_set = value_set - cmdline_set
			if len(missing_set) == 0:
				log.info(consts.STR_VERIFY_PROFILE_VALUE_OK % ("cmdline", str(value_set)))
				return True
			else:
				cmdline_dict = {v.split("=", 1)[0]: v for v in cmdline_set}
				for m in missing_set:
					arg = m.split("=", 1)[0]
					if not arg in cmdline_dict:
						log.error(consts.STR_VERIFY_PROFILE_CMDLINE_FAIL_MISSING % (arg, m))
					else:
						log.error(consts.STR_VERIFY_PROFILE_CMDLINE_FAIL % (cmdline_dict[arg], m))
				present_set = value_set & cmdline_set
				log.info("expected arguments that are present in cmdline: %s"%(" ".join(present_set),))
				return False
		if enabling and value is not None:
			log.info("installing additional boot command line parameters to grub2")
			self.update_grub2_cfg = True
			self._cmdline_val = v

	@command_custom("skip_grub_config", per_device = False, priority = 10)
	def _skip_grub_config(self, enabling, value, verify, ignore_missing):
		if verify:
			return None
		if enabling and value is not None:
			if self._cmd.get_bool(value) == "1":
				log.info("skipping any modification of grub config")
				self._skip_grub_config_val = True

	def _instance_post_static(self, instance, enabling):
		if enabling and self._skip_grub_config_val:
			if len(self._initrd_val) > 0:
				log.warn("requested changes to initrd will not be applied!")
			if len(self._cmdline_val) > 0:
				log.warn("requested changes to cmdline will not be applied!")
			# ensure that the desired cmdline is always written to BOOT_CMDLINE_FILE (/etc/tuned/bootcmdline)
			self._patch_bootcmdline({consts.BOOT_CMDLINE_TUNED_VAR : self._cmdline_val, consts.BOOT_CMDLINE_INITRD_ADD_VAR : self._initrd_val})
		elif enabling and self.update_grub2_cfg:
			if self._rpm_ostree:
				self._rpm_ostree_update()
			else:
				self._grub2_update()
				self._bls_update()
			self.update_grub2_cfg = False
plugins/plugin_scsi_host.py000064400000006121150510354010012145 0ustar00import errno
from . import hotplug
from .decorators import *
import tuned.logs
import tuned.consts as consts
from tuned.utils.commands import commands
import os
import re

log = tuned.logs.get()

class SCSIHostPlugin(hotplug.Plugin):
	"""
	`scsi_host`::
	
	Tunes options for SCSI hosts.
	+
	The plug-in sets Aggressive Link Power Management (ALPM) to the value specified
	by the [option]`alpm` option. The option takes one of three values:
	`min_power`, `medium_power` and `max_performance`.
	+
	NOTE: ALPM is only available on SATA controllers that use the Advanced
	Host Controller Interface (AHCI).
	+
	.ALPM setting when extended periods of idle time are expected
	====
	----
	[scsi_host]
	alpm=min_power
	----
	====
	"""

	def __init__(self, *args, **kwargs):
		super(SCSIHostPlugin, self).__init__(*args, **kwargs)

		self._cmd = commands()

	def _init_devices(self):
		super(SCSIHostPlugin, self)._init_devices()
		self._devices_supported = True
		self._free_devices = set()
		for device in self._hardware_inventory.get_devices("scsi"):
			if self._device_is_supported(device):
				self._free_devices.add(device.sys_name)

		self._assigned_devices = set()

	def _get_device_objects(self, devices):
		return [self._hardware_inventory.get_device("scsi", x) for x in devices]

	@classmethod
	def _device_is_supported(cls, device):
		return  device.device_type == "scsi_host"

	def _hardware_events_init(self):
		self._hardware_inventory.subscribe(self, "scsi", self._hardware_events_callback)

	def _hardware_events_cleanup(self):
		self._hardware_inventory.unsubscribe(self)

	def _hardware_events_callback(self, event, device):
		if self._device_is_supported(device):
			super(SCSIHostPlugin, self)._hardware_events_callback(event, device)

	def _added_device_apply_tuning(self, instance, device_name):
		super(SCSIHostPlugin, self)._added_device_apply_tuning(instance, device_name)

	def _removed_device_unapply_tuning(self, instance, device_name):
		super(SCSIHostPlugin, self)._removed_device_unapply_tuning(instance, device_name)

	@classmethod
	def _get_config_options(cls):
		return {
			"alpm"               : None,
		}

	def _instance_init(self, instance):
		instance._has_static_tuning = True
		instance._has_dynamic_tuning = False

	def _instance_cleanup(self, instance):
		pass

	def _get_alpm_policy_file(self, device):
		return os.path.join("/sys/class/scsi_host/", str(device), "link_power_management_policy")

	@command_set("alpm", per_device = True)
	def _set_alpm(self, policy, device, sim, remove):
		if policy is None:
			return None
		policy_file = self._get_alpm_policy_file(device)
		if not sim:
			if os.path.exists(policy_file):
				self._cmd.write_to_file(policy_file, policy, \
					no_error = [errno.ENOENT] if remove else False)
			else:
				log.info("ALPM control file ('%s') not found, skipping ALPM setting for '%s'" % (policy_file, str(device)))
				return None
		return policy

	@command_get("alpm")
	def _get_alpm(self, device, ignore_missing=False):
		policy_file = self._get_alpm_policy_file(device)
		policy = self._cmd.read_file(policy_file, no_error = True).strip()
		return policy if policy != "" else None
plugins/plugin_rtentsk.py000064400000002125150510354010011641 0ustar00from . import base
from .decorators import *
import tuned.logs
from tuned.utils.commands import commands
import glob
import socket
import time

log = tuned.logs.get()

class RTENTSKPlugin(base.Plugin):
	"""
	`rtentsk`::
	
	Plugin for avoiding interruptions due to static key IPIs due
        to opening socket with timestamping enabled (by opening a
        socket ourselves the static key is kept enabled).
	"""

	def _instance_init(self, instance):
		instance._has_static_tuning = True
		instance._has_dynamic_tuning = False

		# SO_TIMESTAMP nor SOF_TIMESTAMPING_OPT_TX_SWHW is defined by
		# the socket class

		SO_TIMESTAMP = 29 # see include/uapi/asm-generic/socket.h
		#define SO_TIMESTAMP 0x4012 # parisc!
		SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14) # see include/uapi/linux/net_tstamp.h

		s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
		s.setsockopt(socket.SOL_SOCKET, SO_TIMESTAMP, SOF_TIMESTAMPING_OPT_TX_SWHW)
		self.rtentsk_socket = s
		log.info("opened SOF_TIMESTAMPING_OPT_TX_SWHW socket")

	def _instance_cleanup(self, instance):
		s = self.rtentsk_socket
		s.close()
plugins/plugin_video.py000064400000007337150510354010011267 0ustar00from . import base
from .decorators import *
import tuned.logs
from tuned.utils.commands import commands
import os
import errno
import re

log = tuned.logs.get()

class VideoPlugin(base.Plugin):
	"""
	`video`::
	
	Sets various powersave levels on video cards. Currently, only the
	Radeon cards are supported. The powersave level can be specified
	by using the [option]`radeon_powersave` option. Supported values are:
	+
	--
	* `default`
	* `auto`
	* `low`
	* `mid`
	* `high`
	* `dynpm`
	* `dpm-battery`
	* `dpm-balanced`
	* `dpm-perfomance`
	--
	+
	For additional detail, see
	link:https://www.x.org/wiki/RadeonFeature/#kmspowermanagementoptions[KMS Power Management Options].
	+
	NOTE: This plug-in is experimental and the option might change in future releases.
	+
	.To set the powersave level for the Radeon video card to high
	====
	----
	[video]
	radeon_powersave=high
	----
	====
	"""

	def _init_devices(self):
		self._devices_supported = True
		self._free_devices = set()
		self._assigned_devices = set()

		# FIXME: this is a blind shot, needs testing
		for device in self._hardware_inventory.get_devices("drm").match_sys_name("card*").match_property("DEVTYPE", "drm_minor"):
			self._free_devices.add(device.sys_name)

		self._cmd = commands()

	def _get_device_objects(self, devices):
		return [self._hardware_inventory.get_device("drm", x) for x in devices]

	@classmethod
	def _get_config_options(self):
		return {
			"radeon_powersave" : None,
		}

	def _instance_init(self, instance):
		instance._has_dynamic_tuning = False
		instance._has_static_tuning = True

	def _instance_cleanup(self, instance):
		pass

	def _radeon_powersave_files(self, device):
		return {
			"method" : "/sys/class/drm/%s/device/power_method" % device,
			"profile": "/sys/class/drm/%s/device/power_profile" % device,
			"dpm_state": "/sys/class/drm/%s/device/power_dpm_state" % device
		}

	@command_set("radeon_powersave", per_device=True)
	def _set_radeon_powersave(self, value, device, sim, remove):
		sys_files = self._radeon_powersave_files(device)
		va = str(re.sub(r"(\s*:\s*)|(\s+)|(\s*;\s*)|(\s*,\s*)", " ", value)).split()
		if not os.path.exists(sys_files["method"]):
			if not sim:
				log.warn("radeon_powersave is not supported on '%s'" % device)
				return None
		for v in va:
			if v in ["default", "auto", "low", "mid", "high"]:
				if not sim:
					if (self._cmd.write_to_file(sys_files["method"], "profile", \
						no_error = [errno.ENOENT] if remove else False) and
						self._cmd.write_to_file(sys_files["profile"], v, \
							no_error = [errno.ENOENT] if remove else False)):
								return v
			elif v == "dynpm":
				if not sim:
					if (self._cmd.write_to_file(sys_files["method"], "dynpm", \
						no_error = [errno.ENOENT] if remove else False)):
							return "dynpm"
			# new DPM profiles, recommended to use if supported
			elif v in ["dpm-battery", "dpm-balanced", "dpm-performance"]:
				if not sim:
					state = v[len("dpm-"):]
					if (self._cmd.write_to_file(sys_files["method"], "dpm", \
						no_error = [errno.ENOENT] if remove else False) and
						self._cmd.write_to_file(sys_files["dpm_state"], state, \
							no_error = [errno.ENOENT] if remove else False)):
								return v
			else:
				if not sim:
					log.warn("Invalid option for radeon_powersave.")
				return None
		return None

	@command_get("radeon_powersave")
	def _get_radeon_powersave(self, device, ignore_missing = False):
		sys_files = self._radeon_powersave_files(device)
		method = self._cmd.read_file(sys_files["method"], no_error=ignore_missing).strip()
		if method == "profile":
			return self._cmd.read_file(sys_files["profile"]).strip()
		elif method == "dynpm":
			return method
		elif method == "dpm":
			return "dpm-" + self._cmd.read_file(sys_files["dpm_state"]).strip()
		else:
			return None
plugins/plugin_scheduler.py000064400000155677150510354010012152 0ustar00# code for cores isolation was inspired by Tuna implementation
# perf code was borrowed from kernel/tools/perf/python/twatch.py
# thanks to Arnaldo Carvalho de Melo <acme@redhat.com>

from . import base
from .decorators import *
import tuned.logs
import re
from subprocess import *
import threading
import perf
import select
import tuned.consts as consts
import procfs
from tuned.utils.commands import commands
import errno
import os
import collections
import math
# Check existence of scheduler API in os module
try:
	os.SCHED_FIFO
except AttributeError:
	import schedutils

log = tuned.logs.get()

class SchedulerParams(object):
	def __init__(self, cmd, cmdline = None, scheduler = None,
			priority = None, affinity = None, cgroup = None):
		self._cmd = cmd
		self.cmdline = cmdline
		self.scheduler = scheduler
		self.priority = priority
		self.affinity = affinity
		self.cgroup = cgroup

	@property
	def affinity(self):
		if self._affinity is None:
			return None
		else:
			return self._cmd.bitmask2cpulist(self._affinity)

	@affinity.setter
	def affinity(self, value):
		if value is None:
			self._affinity = None
		else:
			self._affinity = self._cmd.cpulist2bitmask(value)

class IRQAffinities(object):
	def __init__(self):
		self.irqs = {}
		self.default = None
		# IRQs that don't support changing CPU affinity:
		self.unchangeable = []

class SchedulerUtils(object):
	"""
	Class encapsulating scheduler implementation in os module
	"""

	_dict_schedcfg2schedconst = {
		"f": "SCHED_FIFO",
		"b": "SCHED_BATCH",
		"r": "SCHED_RR",
		"o": "SCHED_OTHER",
		"i": "SCHED_IDLE",
	}

	def __init__(self):
		# {"f": os.SCHED_FIFO...}
		self._dict_schedcfg2num = dict((k, getattr(os, name)) for k, name in self._dict_schedcfg2schedconst.items())
		# { os.SCHED_FIFO: "SCHED_FIFO"... }
		self._dict_num2schedconst = dict((getattr(os, name), name) for name in self._dict_schedcfg2schedconst.values())

	def sched_cfg_to_num(self, str_scheduler):
		return self._dict_schedcfg2num.get(str_scheduler)

	# Reimplementation of schedstr from schedutils for logging purposes
	def sched_num_to_const(self, scheduler):
		return self._dict_num2schedconst.get(scheduler)

	def get_scheduler(self, pid):
		return os.sched_getscheduler(pid)

	def set_scheduler(self, pid, sched, prio):
		os.sched_setscheduler(pid, sched, os.sched_param(prio))

	def get_affinity(self, pid):
		return os.sched_getaffinity(pid)

	def set_affinity(self, pid, affinity):
		os.sched_setaffinity(pid, affinity)

	def get_priority(self, pid):
		return os.sched_getparam(pid).sched_priority

	def get_priority_min(self, sched):
		return os.sched_get_priority_min(sched)

	def get_priority_max(self, sched):
		return os.sched_get_priority_max(sched)

class SchedulerUtilsSchedutils(SchedulerUtils):
	"""
	Class encapsulating scheduler implementation in schedutils module
	"""
	def __init__(self):
		# { "f": schedutils.SCHED_FIFO... }
		self._dict_schedcfg2num = dict((k, getattr(schedutils, name)) for k, name in self._dict_schedcfg2schedconst.items())
		# { schedutils.SCHED_FIFO: "SCHED_FIFO"... }
		self._dict_num2schedconst = dict((getattr(schedutils, name), name) for name in self._dict_schedcfg2schedconst.values())

	def get_scheduler(self, pid):
		return schedutils.get_scheduler(pid)

	def set_scheduler(self, pid, sched, prio):
		schedutils.set_scheduler(pid, sched, prio)

	def get_affinity(self, pid):
		return schedutils.get_affinity(pid)

	def set_affinity(self, pid, affinity):
		schedutils.set_affinity(pid, affinity)

	def get_priority(self, pid):
		return schedutils.get_priority(pid)

	def get_priority_min(self, sched):
		return schedutils.get_priority_min(sched)

	def get_priority_max(self, sched):
		return schedutils.get_priority_max(sched)

class SchedulerPlugin(base.Plugin):
	r"""
	`scheduler`::
	
	Allows tuning of scheduling priorities, process/thread/IRQ
	affinities, and CPU isolation.
	+
	To prevent processes/threads/IRQs from using certain CPUs, use
	the [option]`isolated_cores` option. It changes process/thread
	affinities, IRQs affinities and it sets `default_smp_affinity`
	for IRQs. The CPU affinity mask is adjusted for all processes and
	threads matching [option]`ps_whitelist` option subject to success
	of the `sched_setaffinity()` system call. The default setting of
	the [option]`ps_whitelist` regular expression is `.*` to match all
	processes and thread names. To exclude certain processes and threads
	use [option]`ps_blacklist` option. The value of this option is also
	interpreted as a regular expression and process/thread names (`ps -eo
	cmd`) are matched against that expression. Profile rollback allows
	all matching processes and threads to run on all CPUs and restores
	the IRQ settings prior to the profile application.
	+
	Multiple regular expressions for [option]`ps_whitelist`
	and [option]`ps_blacklist` options are allowed and separated by
	`;`. Quoted semicolon `\;` is taken literally.
	+
	.Isolate CPUs 2-4
	====
	----
	[scheduler]
	isolated_cores=2-4
	ps_blacklist=.*pmd.*;.*PMD.*;^DPDK;.*qemu-kvm.*
	----
	Isolate CPUs 2-4 while ignoring processes and threads matching
	`ps_blacklist` regular expressions.
	====
	The [option]`irq_process` option controls whether the scheduler plugin
	applies the `isolated_cores` parameter to IRQ affinities. The default
	value is `true`, which means that the scheduler plugin will move all
	possible IRQs away from the isolated cores. When `irq_process` is set
	to `false`, the plugin will not change any IRQ affinities.
	====
	The [option]`default_irq_smp_affinity` option controls the values
	*TuneD* writes to `/proc/irq/default_smp_affinity`. The file specifies
	default affinity mask that applies to all non-active IRQs. Once an
	IRQ is allocated/activated its affinity bitmask will be set to the
	default mask.
	+
	The following values are supported:
	+
	--
	`calc`::
	Content of `/proc/irq/default_smp_affinity` will be calculated
	from the `isolated_cores` parameter. Non-isolated cores
	are calculated as an inversion of the `isolated_cores`. Then
	the intersection of the non-isolated cores and the previous
	content of `/proc/irq/default_smp_affinity` is written to
	`/proc/irq/default_smp_affinity`. If the intersection is
	an empty set, then just the non-isolated cores are written to
	`/proc/irq/default_smp_affinity`. This behavior is the default if
	the parameter `default_irq_smp_affinity` is omitted.
	`ignore`::
	*TuneD* will not touch `/proc/irq/default_smp_affinity`.
	explicit cpulist::
	The cpulist (such as 1,3-4) is unpacked and written directly to
	`/proc/irq/default_smp_affinity`.
	--
	+
	.An explicit CPU list to set the default IRQ smp affinity to CPUs 0 and 2
	====
	----
	[scheduler]
	isolated_cores=1,3
	default_irq_smp_affinity=0,2
	----
	====
	To adjust scheduling policy, priority and affinity for a group of
	processes/threads, use the following syntax.
	+
	[subs="+quotes,+macros"]
	----
	group.__groupname__=__rule_prio__:__sched__:__prio__:__affinity__:__regex__
	----
	+
	where `__rule_prio__` defines internal *TuneD* priority of the
	rule. Rules are sorted based on priority. This is needed for
	inheritence to be able to reorder previously defined rules. Equal
	`__rule_prio__` rules should be processed in the order they were
	defined. However, this is Python interpreter dependant. To disable
	an inherited rule for `__groupname__` use:
	+
	[subs="+quotes,+macros"]
	----
	group.__groupname__=
	----
	+
	`__sched__` must be one of:
	*`f`* for FIFO,
	*`b`* for batch,
	*`r`* for round robin,
	*`o`* for other,
	*`*`* do not change.
	+
	`__affinity__` is CPU affinity in hexadecimal. Use `*` for no change.
	+
	`__prio__` scheduling priority (see `chrt -m`).
	+
	`__regex__` is Python regular expression. It is matched against the output of
	+
	[subs="+quotes,+macros"]
	----
	ps -eo cmd
	----
	+
	Any given process name may match more than one group. In such a case,
	the priority and scheduling policy are taken from the last matching
	`__regex__`.
	+
	.Setting scheduling policy and priorities to kernel threads and watchdog
	====
	----
	[scheduler]
	group.kthreads=0:*:1:*:\[.*\]$
	group.watchdog=0:f:99:*:\[watchdog.*\]
	----
	====
	+
	The scheduler plug-in uses perf event loop to catch newly created
	processes. By default it listens to `perf.RECORD_COMM` and
	`perf.RECORD_EXIT` events. By setting [option]`perf_process_fork`
	option to `true`, `perf.RECORD_FORK` events will be also listened
	to. In other words, child processes created by the `fork()` system
	call will be processed. Since child processes inherit CPU affinity
	from their parents, the scheduler plug-in usually does not need to
	explicitly process these events. As processing perf events can
	pose a significant CPU overhead, the [option]`perf_process_fork`
	option parameter is set to `false` by default. Due to this, child
	processes are not processed by the scheduler plug-in.
	+
	The CPU overhead of the scheduler plugin can be mitigated by using
	the scheduler [option]`runtime` option and setting it to `0`. This
	will completely disable the dynamic scheduler functionality and the
	perf events will not be monitored and acted upon. The disadvantage
	ot this approach is the procees/thread tuning will be done only at
	profile application.
	+
	.Disabling the scheduler dynamic functionality
	====
	----
	[scheduler]
	runtime=0
	isolated_cores=1,3
	----
	====
	+
	NOTE: For perf events, memory mapped buffer is used. Under heavy load
	the buffer may overflow. In such cases the `scheduler` plug-in
	may start missing events and failing to process some newly created
	processes. Increasing the buffer size may help. The buffer size can
	be set with the [option]`perf_mmap_pages` option. The value of this
	parameter has to expressed in powers of 2. If it is not the power
	of 2, the nearest higher power of 2 value is calculated from it
	and this calculated value used. If the [option]`perf_mmap_pages`
	option is omitted, the default kernel value is used.
	+
	The scheduler plug-in supports process/thread confinement using
	cgroups v1.
	+
	[option]`cgroup_mount_point` option specifies the path to mount the
	cgroup filesystem or where *TuneD* expects it to be mounted. If unset,
	`/sys/fs/cgroup/cpuset` is expected.
	+
	If [option]`cgroup_groups_init` option is set to `1` *TuneD*
	will create (and remove) all cgroups defined with the `cgroup*`
	options. This is the default behavior. If it is set to `0` the
	cgroups need to be preset by other means.
	+
	If [option]`cgroup_mount_point_init` option is set to `1`,
	*TuneD* will create (and remove) the cgroup mountpoint. It implies
	`cgroup_groups_init = 1`. If set to `0` the cgroups mount point
	needs to be preset by other means. This is the default behavior.
	+
	The [option]`cgroup_for_isolated_cores` option is the cgroup
	name used for the [option]`isolated_cores` option functionality. For
	example, if a system has 4 CPUs, `isolated_cores=1` means that all
	processes/threads will be moved to CPUs 0,2-3.
	The scheduler plug-in will isolate the specified core by writing
	the calculated CPU affinity to the `cpuset.cpus` control file of
	the specified cgroup and move all the matching processes/threads to
	this group. If this option is unset, classic cpuset affinity using
	`sched_setaffinity()` will be used.
	+
	[option]`cgroup.__cgroup_name__` option defines affinities for
	arbitrary cgroups. Even hierarchic cgroups can be used, but the
	hieararchy needs to be specified in the correct order. Also *TuneD*
	does not do any sanity checks here, with the exception that it forces
	the cgroup to be under [option]`cgroup_mount_point`.
	+
	The syntax of the scheduler option starting with `group.` has been
	augmented to use `cgroup.__cgroup_name__` instead of the hexadecimal
	`__affinity__`. The matching processes will be moved to the cgroup
	`__cgroup_name__`. It is also possible to use cgroups which have
	not been defined by the [option]`cgroup.` option as described above,
	i.e. cgroups not managed by *TuneD*.
	+
	All cgroup names are sanitized by replacing all all dots (`.`) with
	slashes (`/`). This is to prevent the plug-in from writing outside
	[option]`cgroup_mount_point`.
	+
	.Using cgroups v1 with the scheduler plug-in
	====
	----
	[scheduler]
	cgroup_mount_point=/sys/fs/cgroup/cpuset
	cgroup_mount_point_init=1
	cgroup_groups_init=1
	cgroup_for_isolated_cores=group
	cgroup.group1=2
	cgroup.group2=0,2
	
	group.ksoftirqd=0:f:2:cgroup.group1:ksoftirqd.*
	ps_blacklist=ksoftirqd.*;rcuc.*;rcub.*;ktimersoftd.*
	isolated_cores=1
	----
	Cgroup `group1` has the affinity set to CPU 2 and the cgroup `group2`
	to CPUs 0,2. Given a 4 CPU setup, the [option]`isolated_cores=1`
	option causes all processes/threads to be moved to CPU
	cores 0,2-3. Processes/threads that are blacklisted by the
	[option]`ps_blacklist` regular expression will not be moved.
	
	The scheduler plug-in will isolate the specified core by writing the
	CPU affinity 0,2-3 to the `cpuset.cpus` control file of the `group`
	and move all the matching processes/threads to this cgroup.
	====
	Option [option]`cgroup_ps_blacklist` allows excluding processes
	which belong to the blacklisted cgroups. The regular expression specified
	by this option is matched against cgroup hierarchies from
	`/proc/PID/cgroups`. Cgroups v1 hierarchies from `/proc/PID/cgroups`
	are separated by commas ',' prior to regular expression matching. The
	following is an example of content against which the regular expression
	is matched against: `10:hugetlb:/,9:perf_event:/,8:blkio:/`
	+
	Multiple regular expressions can be separated by semicolon ';'. The
	semicolon represents a logical 'or' operator.
	+
	.Cgroup-based exclusion of processes from the scheduler
	====
	----
	[scheduler]
	isolated_cores=1
	cgroup_ps_blacklist=:/daemons\b
	----
	
	The scheduler plug-in will move all processes away from core 1 except processes which
	belong to cgroup '/daemons'. The '\b' is a regular expression
	metacharacter that matches a word boundary.
	
	----
	[scheduler]
	isolated_cores=1
	cgroup_ps_blacklist=\b8:blkio:
	----
	
	The scheduler plug-in will exclude all processes which belong to a cgroup
	with hierarchy-ID 8 and controller-list blkio.
	====
	Recent kernels moved some `sched_` and `numa_balancing_` kernel run-time
	parameters from `/proc/sys/kernel`, managed by the `sysctl` utility, to
	`debugfs`, typically mounted under `/sys/kernel/debug`.  TuneD provides an
	abstraction mechanism for the following parameters via the scheduler plug-in:
	[option]`sched_min_granularity_ns`, [option]`sched_latency_ns`,
	[option]`sched_wakeup_granularity_ns`, [option]`sched_tunable_scaling`,
	[option]`sched_migration_cost_ns`, [option]`sched_nr_migrate`,
	[option]`numa_balancing_scan_delay_ms`,
	[option]`numa_balancing_scan_period_min_ms`,
	[option]`numa_balancing_scan_period_max_ms` and
	[option]`numa_balancing_scan_size_mb`.
	Based on the kernel used, TuneD will write the specified value to the correct
	location.
	+
	.Set tasks' "cache hot" value for migration decisions.
	====
	----
	[scheduler]
	sched_migration_cost_ns=500000
	----
	On the old kernels, this is equivalent to:
	----
	[sysctl]
	kernel.sched_migration_cost_ns=500000
	----
	that is, value `500000` will be written to `/proc/sys/kernel/sched_migration_cost_ns`.
	However, on more recent kernels, the value `500000` will be written to
	`/sys/kernel/debug/sched/migration_cost_ns`.
	====
	"""

	def __init__(self, monitor_repository, storage_factory, hardware_inventory, device_matcher, device_matcher_udev, plugin_instance_factory, global_cfg, variables):
		super(SchedulerPlugin, self).__init__(monitor_repository, storage_factory, hardware_inventory, device_matcher, device_matcher_udev, plugin_instance_factory, global_cfg, variables)
		self._has_dynamic_options = True
		self._daemon = consts.CFG_DEF_DAEMON
		self._sleep_interval = int(consts.CFG_DEF_SLEEP_INTERVAL)
		if global_cfg is not None:
			self._daemon = global_cfg.get_bool(consts.CFG_DAEMON, consts.CFG_DEF_DAEMON)
			self._sleep_interval = int(global_cfg.get(consts.CFG_SLEEP_INTERVAL, consts.CFG_DEF_SLEEP_INTERVAL))
		self._cmd = commands()
		# helper variable utilized for showing hint only once that the error may be caused by Secure Boot
		self._secure_boot_hint = None
		# paths cache for sched_ and numa_ tunings
		self._sched_knob_paths_cache = {}
		# default is to whitelist all and blacklist none
		self._ps_whitelist = ".*"
		self._ps_blacklist = ""
		self._cgroup_ps_blacklist_re = ""
		self._cpus = perf.cpu_map()
		self._scheduler_storage_key = self._storage_key(
				command_name = "scheduler")
		self._irq_process = True
		self._irq_storage_key = self._storage_key(
				command_name = "irq")
		self._evlist = None
		try:
			self._scheduler_utils = SchedulerUtils()
		except AttributeError:
			self._scheduler_utils = SchedulerUtilsSchedutils()

	def _calc_mmap_pages(self, mmap_pages):
		if mmap_pages is None:
			return None
		try:
			mp = int(mmap_pages)
		except ValueError:
			return 0
		if mp <= 0:
			return 0
		# round up to the nearest power of two value
		return int(2 ** math.ceil(math.log(mp, 2)))

	def _instance_init(self, instance):
		instance._evlist = None
		instance._has_dynamic_tuning = False
		instance._has_static_tuning = True
		# this is hack, runtime_tuning should be covered by dynamic_tuning configuration
		# TODO: add per plugin dynamic tuning configuration and use dynamic_tuning configuration
		# instead of runtime_tuning
		instance._runtime_tuning = True

		# FIXME: do we want to do this here?
		# recover original values in case of crash
		self._scheduler_original = self._storage.get(
				self._scheduler_storage_key, {})
		if len(self._scheduler_original) > 0:
			log.info("recovering scheduling settings from previous run")
			self._restore_ps_affinity()
			self._scheduler_original = {}
			self._storage.unset(self._scheduler_storage_key)

		self._cgroups_original_affinity = dict()

		# calculated by isolated_cores setter
		self._affinity = None

		self._cgroup_affinity_initialized = False
		self._cgroup = None
		self._cgroups = collections.OrderedDict([(self._sanitize_cgroup_path(option[7:]), self._variables.expand(affinity))
			for option, affinity in instance.options.items() if option[:7] == "cgroup." and len(option) > 7])

		instance._scheduler = instance.options

		perf_mmap_pages_raw = self._variables.expand(instance.options["perf_mmap_pages"])
		perf_mmap_pages = self._calc_mmap_pages(perf_mmap_pages_raw)
		if perf_mmap_pages == 0:
			log.error("Invalid 'perf_mmap_pages' value specified: '%s', using default kernel value" % perf_mmap_pages_raw)
			perf_mmap_pages = None
		if perf_mmap_pages is not None and str(perf_mmap_pages) != perf_mmap_pages_raw:
			log.info("'perf_mmap_pages' value has to be power of two, specified: '%s', using: '%d'" %
				(perf_mmap_pages_raw, perf_mmap_pages))
		for k in instance._scheduler:
			instance._scheduler[k] = self._variables.expand(instance._scheduler[k])
		if self._cmd.get_bool(instance._scheduler.get("runtime", 1)) == "0":
			instance._runtime_tuning = False
		instance._terminate = threading.Event()
		if self._daemon and instance._runtime_tuning:
			try:
				instance._threads = perf.thread_map()
				evsel = perf.evsel(type = perf.TYPE_SOFTWARE,
					config = perf.COUNT_SW_DUMMY,
					task = 1, comm = 1, mmap = 0, freq = 0,
					wakeup_events = 1, watermark = 1,
					sample_type = perf.SAMPLE_TID | perf.SAMPLE_CPU)
				evsel.open(cpus = self._cpus, threads = instance._threads)
				instance._evlist = perf.evlist(self._cpus, instance._threads)
				instance._evlist.add(evsel)
				if perf_mmap_pages is None:
					instance._evlist.mmap()
				else:
					instance._evlist.mmap(pages = perf_mmap_pages)
			# no perf
			except:
				instance._runtime_tuning = False

	def _instance_cleanup(self, instance):
		if instance._evlist:
			for fd in instance._evlist.get_pollfd():
				os.close(fd.name)

	@classmethod
	def _get_config_options(cls):
		return {
			"isolated_cores": None,
			"cgroup_mount_point": consts.DEF_CGROUP_MOUNT_POINT,
			"cgroup_mount_point_init": False,
			"cgroup_groups_init": True,
			"cgroup_for_isolated_cores": None,
			"cgroup_ps_blacklist": None,
			"ps_whitelist": None,
			"ps_blacklist": None,
			"irq_process": True,
			"default_irq_smp_affinity": "calc",
			"perf_mmap_pages": None,
			"perf_process_fork": "false",
			"sched_min_granularity_ns": None,
			"sched_latency_ns": None,
			"sched_wakeup_granularity_ns": None,
			"sched_tunable_scaling": None,
			"sched_migration_cost_ns": None,
			"sched_nr_migrate": None,
			"numa_balancing_scan_delay_ms": None,
			"numa_balancing_scan_period_min_ms": None,
			"numa_balancing_scan_period_max_ms": None,
			"numa_balancing_scan_size_mb": None
		}

	def _sanitize_cgroup_path(self, value):
		return str(value).replace(".", "/") if value is not None else None

	# Raises OSError, IOError
	def _get_cmdline(self, process):
		if not isinstance(process, procfs.process):
			pid = process
			process = procfs.process(pid)
		cmdline = procfs.process_cmdline(process)
		if self._is_kthread(process):
			cmdline = "[" + cmdline + "]"
		return cmdline

	# Raises OSError, IOError
	def get_processes(self):
		ps = procfs.pidstats()
		ps.reload_threads()
		processes = {}
		for proc in ps.values():
			try:
				cmd = self._get_cmdline(proc)
				pid = proc["pid"]
				processes[pid] = cmd
				if "threads" in proc:
					for pid in proc["threads"].keys():
						cmd = self._get_cmdline(proc)
						processes[pid] = cmd
			except (OSError, IOError) as e:
				if e.errno == errno.ENOENT \
						or e.errno == errno.ESRCH:
					continue
				else:
					raise
		return processes

	# Raises OSError
	# Raises SystemError with old (pre-0.4) python-schedutils
	# instead of OSError
	# If PID doesn't exist, errno == ESRCH
	def _get_rt(self, pid):
		scheduler = self._scheduler_utils.get_scheduler(pid)
		sched_str = self._scheduler_utils.sched_num_to_const(scheduler)
		priority = self._scheduler_utils.get_priority(pid)
		log.debug("Read scheduler policy '%s' and priority '%d' of PID '%d'"
				% (sched_str, priority, pid))
		return (scheduler, priority)

	def _set_rt(self, pid, sched, prio):
		sched_str = self._scheduler_utils.sched_num_to_const(sched)
		log.debug("Setting scheduler policy to '%s' and priority to '%d' of PID '%d'."
				% (sched_str, prio, pid))
		try:
			prio_min = self._scheduler_utils.get_priority_min(sched)
			prio_max = self._scheduler_utils.get_priority_max(sched)
			if prio < prio_min or prio > prio_max:
				log.error("Priority for %s must be in range %d - %d. '%d' was given."
						% (sched_str, prio_min,
						prio_max, prio))
		# Workaround for old (pre-0.4) python-schedutils which raised
		# SystemError instead of OSError
		except (SystemError, OSError) as e:
			log.error("Failed to get allowed priority range: %s"
					% e)
		try:
			self._scheduler_utils.set_scheduler(pid, sched, prio)
		except (SystemError, OSError) as e:
			if hasattr(e, "errno") and e.errno == errno.ESRCH:
				log.debug("Failed to set scheduling parameters of PID %d, the task vanished."
						% pid)
			else:
				log.error("Failed to set scheduling parameters of PID %d: %s"
						% (pid, e))

	# process is a procfs.process object
	# Raises OSError, IOError
	def _is_kthread(self, process):
		return process["stat"]["flags"] & procfs.pidstat.PF_KTHREAD != 0

	# Return codes:
	# 0 - Affinity is fixed
	# 1 - Affinity is changeable
	# -1 - Task vanished
	# -2 - Error
	def _affinity_changeable(self, pid):
		try:
			process = procfs.process(pid)
			if process["stat"].is_bound_to_cpu():
				if process["stat"]["state"] == "Z":
					log.debug("Affinity of zombie task with PID %d cannot be changed, the task's affinity mask is fixed."
							% pid)
				elif self._is_kthread(process):
					log.debug("Affinity of kernel thread with PID %d cannot be changed, the task's affinity mask is fixed."
							% pid)
				else:
					log.warn("Affinity of task with PID %d cannot be changed, the task's affinity mask is fixed."
							% pid)
				return 0
			else:
				return 1
		except (OSError, IOError) as e:
			if e.errno == errno.ENOENT or e.errno == errno.ESRCH:
				log.debug("Failed to get task info for PID %d, the task vanished."
						% pid)
				return -1
			else:
				log.error("Failed to get task info for PID %d: %s"
						% (pid, e))
				return -2
		except (AttributeError, KeyError) as e:
			log.error("Failed to get task info for PID %d: %s"
					% (pid, e))
			return -2

	def _store_orig_process_rt(self, pid, scheduler, priority):
		try:
			params = self._scheduler_original[pid]
		except KeyError:
			params = SchedulerParams(self._cmd)
			self._scheduler_original[pid] = params
		if params.scheduler is None and params.priority is None:
			params.scheduler = scheduler
			params.priority = priority

	def _tune_process_rt(self, pid, sched, prio):
		cont = True
		if sched is None and prio is None:
			return cont
		try:
			(prev_sched, prev_prio) = self._get_rt(pid)
			if sched is None:
				sched = prev_sched
			self._set_rt(pid, sched, prio)
			self._store_orig_process_rt(pid, prev_sched, prev_prio)
		except (SystemError, OSError) as e:
			if hasattr(e, "errno") and e.errno == errno.ESRCH:
				log.debug("Failed to read scheduler policy of PID %d, the task vanished."
						% pid)
				if pid in self._scheduler_original:
					del self._scheduler_original[pid]
				cont = False
			else:
				log.error("Refusing to set scheduler and priority of PID %d, reading original scheduling parameters failed: %s"
						% (pid, e))
		return cont

	def _is_cgroup_affinity(self, affinity):
		return str(affinity)[:7] == "cgroup."

	def _store_orig_process_affinity(self, pid, affinity, is_cgroup = False):
		try:
			params = self._scheduler_original[pid]
		except KeyError:
			params = SchedulerParams(self._cmd)
			self._scheduler_original[pid] = params
		if params.affinity is None and params.cgroup is None:
			if is_cgroup:
				params.cgroup = affinity
			else:
				params.affinity = affinity

	def _get_cgroup_affinity(self, pid):
		# we cannot use procfs, because it uses comma ',' delimiter which
		# can be ambiguous
		for l in self._cmd.read_file("%s/%s/%s" % (consts.PROCFS_MOUNT_POINT, str(pid), "cgroup"), no_error = True).split("\n"):
			try:
				cgroup = l.split(":cpuset:")[1][1:]
				return cgroup if cgroup != "" else "/"
			except IndexError:
				pass
		return "/"

	# it can be arbitrary cgroup even cgroup we didn't set, but it needs to be
	# under "cgroup_mount_point"
	def _set_cgroup(self, pid, cgroup):
		cgroup = self._sanitize_cgroup_path(cgroup)
		path = self._cgroup_mount_point
		if cgroup != "/":
			path = "%s/%s" % (path, cgroup)
		self._cmd.write_to_file("%s/tasks" % path, str(pid), no_error = True)

	def _parse_cgroup_affinity(self, cgroup):
		# "cgroup.CGROUP"
		cgroup = cgroup[7:]
		# this should be faster than string comparison
		is_cgroup = not isinstance(cgroup, list) and len(cgroup) > 0
		return is_cgroup, cgroup

	def _tune_process_affinity(self, pid, affinity, intersect = False):
		cont = True
		if affinity is None:
			return cont
		try:
			(is_cgroup, cgroup) = self._parse_cgroup_affinity(affinity)
			if is_cgroup:
				prev_affinity = self._get_cgroup_affinity(pid)
				self._set_cgroup(pid, cgroup)
			else:
				prev_affinity = self._get_affinity(pid)
				if intersect:
					affinity = self._get_intersect_affinity(
							prev_affinity, affinity,
							affinity)
				self._set_affinity(pid, affinity)
			self._store_orig_process_affinity(pid,
					prev_affinity, is_cgroup)
		except (SystemError, OSError) as e:
			if hasattr(e, "errno") and e.errno == errno.ESRCH:
				log.debug("Failed to read affinity of PID %d, the task vanished."
						% pid)
				if pid in self._scheduler_original:
					del self._scheduler_original[pid]
				cont = False
			else:
				log.error("Refusing to set CPU affinity of PID %d, reading original affinity failed: %s"
						% (pid, e))
		return cont

	#tune process and store previous values
	def _tune_process(self, pid, cmd, sched, prio, affinity):
		cont = self._tune_process_rt(pid, sched, prio)
		if not cont:
			return
		cont = self._tune_process_affinity(pid, affinity)
		if not cont or pid not in self._scheduler_original:
			return
		self._scheduler_original[pid].cmdline = cmd

	def _convert_sched_params(self, str_scheduler, str_priority):
		scheduler = self._scheduler_utils.sched_cfg_to_num(str_scheduler)
		if scheduler is None and str_scheduler != "*":
			log.error("Invalid scheduler: %s. Scheduler and priority will be ignored."
					% str_scheduler)
			return (None, None)
		else:
			try:
				priority = int(str_priority)
			except ValueError:
				log.error("Invalid priority: %s. Scheduler and priority will be ignored."
							% str_priority)
				return (None, None)
		return (scheduler, priority)

	def _convert_affinity(self, str_affinity):
		if str_affinity == "*":
			affinity = None
		elif self._is_cgroup_affinity(str_affinity):
			affinity = str_affinity
		else:
			affinity = self._cmd.hex2cpulist(str_affinity)
			if not affinity:
				log.error("Invalid affinity: %s. It will be ignored."
						% str_affinity)
				affinity = None
		return affinity

	def _convert_sched_cfg(self, vals):
		(rule_prio, scheduler, priority, affinity, regex) = vals
		(scheduler, priority) = self._convert_sched_params(
				scheduler, priority)
		affinity = self._convert_affinity(affinity)
		return (rule_prio, scheduler, priority, affinity, regex)

	def _cgroup_create_group(self, cgroup):
		path = "%s/%s" % (self._cgroup_mount_point, cgroup)
		try:
			os.mkdir(path, consts.DEF_CGROUP_MODE)
		except OSError as e:
			log.error("Unable to create cgroup '%s': %s" % (path, e))
		if (not self._cmd.write_to_file("%s/%s" % (path, "cpuset.mems"),
				self._cmd.read_file("%s/%s" % (self._cgroup_mount_point, "cpuset.mems"), no_error = True),
				no_error = True)):
					log.error("Unable to initialize 'cpuset.mems ' for cgroup '%s'" % path)

	def _cgroup_initialize_groups(self):
		if self._cgroup is not None and not self._cgroup in self._cgroups:
			self._cgroup_create_group(self._cgroup)
		for cg in self._cgroups:
			self._cgroup_create_group(cg)

	def _cgroup_initialize(self):
		log.debug("Initializing cgroups settings")
		try:
			os.makedirs(self._cgroup_mount_point, consts.DEF_CGROUP_MODE)
		except OSError as e:
			log.error("Unable to create cgroup mount point: %s" % e)
		(ret, out) = self._cmd.execute(["mount", "-t", "cgroup", "-o", "cpuset", "cpuset", self._cgroup_mount_point])
		if ret != 0:
			log.error("Unable to mount '%s'" % self._cgroup_mount_point)

	def _remove_dir(self, cgroup):
		try:
			os.rmdir(cgroup)
		except OSError as e:
			log.error("Unable to remove directory '%s': %s" % (cgroup, e))

	def _cgroup_finalize_groups(self):
		for cg in reversed(self._cgroups):
			self._remove_dir("%s/%s" % (self._cgroup_mount_point, cg))
		if self._cgroup is not None and not self._cgroup in self._cgroups:
			self._remove_dir("%s/%s" % (self._cgroup_mount_point, self._cgroup))

	def _cgroup_finalize(self):
		log.debug("Removing cgroups settings")
		(ret, out) = self._cmd.execute(["umount", self._cgroup_mount_point])
		if ret != 0:
			log.error("Unable to umount '%s'" % self._cgroup_mount_point)
			return False
		self._remove_dir(self._cgroup_mount_point)
		d = os.path.dirname(self._cgroup_mount_point)
		if (d != "/"):
			self._remove_dir(d)

	def _cgroup_set_affinity_one(self, cgroup, affinity, backup = False):
		if affinity != "":
			log.debug("Setting cgroup '%s' affinity to '%s'" % (cgroup, affinity))
		else:
			log.debug("Skipping cgroup '%s', empty affinity requested" % cgroup)
			return
		path = "%s/%s/%s" % (self._cgroup_mount_point, cgroup, "cpuset.cpus")
		if backup:
			orig_affinity = self._cmd.read_file(path, err_ret = "ERR", no_error = True).strip()
			if orig_affinity != "ERR":
				self._cgroups_original_affinity[cgroup] = orig_affinity
			else:
				log.error("Refusing to set affinity of cgroup '%s', reading original affinity failed" % cgroup)
				return
		if not self._cmd.write_to_file(path, affinity, no_error = True):
			log.error("Unable to set affinity '%s' for cgroup '%s'" % (affinity, cgroup))

	def _cgroup_set_affinity(self):
		if self._cgroup_affinity_initialized:
			return
		log.debug("Setting cgroups affinities")
		if self._affinity is not None and self._cgroup is not None and not self._cgroup in self._cgroups:
			self._cgroup_set_affinity_one(self._cgroup, self._affinity, backup = True)
		for cg in self._cgroups.items():
			self._cgroup_set_affinity_one(cg[0], cg[1], backup = True)
		self._cgroup_affinity_initialized = True

	def _cgroup_restore_affinity(self):
		log.debug("Restoring cgroups affinities")
		for cg in self._cgroups_original_affinity.items():
			self._cgroup_set_affinity_one(cg[0], cg[1])

	def _instance_apply_static(self, instance):
		# need to get "cgroup_mount_point_init", "cgroup_mount_point", "cgroup_groups_init",
		# "cgroup", and initialize mount point and cgroups before super class implementation call
		self._cgroup_mount_point = self._variables.expand(instance.options["cgroup_mount_point"])
		self._cgroup_mount_point_init = self._cmd.get_bool(self._variables.expand(
			instance.options["cgroup_mount_point_init"])) == "1"
		self._cgroup_groups_init = self._cmd.get_bool(self._variables.expand(
			instance.options["cgroup_groups_init"])) == "1"
		self._cgroup = self._sanitize_cgroup_path(self._variables.expand(
			instance.options["cgroup_for_isolated_cores"]))

		if self._cgroup_mount_point_init:
			self._cgroup_initialize()
		if self._cgroup_groups_init or self._cgroup_mount_point_init:
			self._cgroup_initialize_groups()

		super(SchedulerPlugin, self)._instance_apply_static(instance)

		self._cgroup_set_affinity()
		try:
			ps = self.get_processes()
		except (OSError, IOError) as e:
			log.error("error applying tuning, cannot get information about running processes: %s"
					% e)
			return
		sched_cfg = [(option, str(value).split(":", 4)) for option, value in instance._scheduler.items()]
		buf = [(option, self._convert_sched_cfg(vals))
				for option, vals in sched_cfg
				if re.match(r"group\.", option)
				and len(vals) == 5]
		sched_cfg = sorted(buf, key=lambda option_vals: option_vals[1][0])
		sched_all = dict()
		# for runtime tuning
		instance._sched_lookup = {}
		for option, (rule_prio, scheduler, priority, affinity, regex) \
				in sched_cfg:
			try:
				r = re.compile(regex)
			except re.error as e:
				log.error("error compiling regular expression: '%s'" % str(regex))
				continue
			processes = [(pid, cmd) for pid, cmd in ps.items() if re.search(r, cmd) is not None]
			#cmd - process name, option - group name
			sched = dict([(pid, (cmd, option, scheduler, priority, affinity, regex))
					for pid, cmd in processes])
			sched_all.update(sched)
			# make any contained regexes non-capturing: replace "(" with "(?:",
			# unless the "(" is preceded by "\" or followed by "?"
			regex = re.sub(r"(?<!\\)\((?!\?)", "(?:", str(regex))
			instance._sched_lookup[regex] = [scheduler, priority, affinity]
		for pid, (cmd, option, scheduler, priority, affinity, regex) \
				in sched_all.items():
			self._tune_process(pid, cmd, scheduler,
					priority, affinity)
		self._storage.set(self._scheduler_storage_key,
				self._scheduler_original)
		if self._daemon and instance._runtime_tuning:
			instance._thread = threading.Thread(target = self._thread_code, args = [instance])
			instance._thread.start()

	def _restore_ps_affinity(self):
		try:
			ps = self.get_processes()
		except (OSError, IOError) as e:
			log.error("error unapplying tuning, cannot get information about running processes: %s"
					% e)
			return
		for pid, orig_params in self._scheduler_original.items():
			# if command line for the pid didn't change, it's very probably the same process
			if pid not in ps or ps[pid] != orig_params.cmdline:
				continue
			if orig_params.scheduler is not None \
					and orig_params.priority is not None:
				self._set_rt(pid, orig_params.scheduler,
						orig_params.priority)
			if orig_params.cgroup is not None:
				self._set_cgroup(pid, orig_params.cgroup)
			elif orig_params.affinity is not None:
				self._set_affinity(pid, orig_params.affinity)
		self._scheduler_original = {}
		self._storage.unset(self._scheduler_storage_key)

	def _cgroup_cleanup_tasks_one(self, cgroup):
		cnt = int(consts.CGROUP_CLEANUP_TASKS_RETRY)
		data = " "
		while data != "" and cnt > 0:
			data = self._cmd.read_file("%s/%s/%s" % (self._cgroup_mount_point, cgroup, "tasks"),
				err_ret = " ", no_error = True)
			if data not in ["", " "]:
				for l in data.split("\n"):
					self._cmd.write_to_file("%s/%s" % (self._cgroup_mount_point, "tasks"), l, no_error = True)
			cnt -= 1
		if cnt == 0:
			log.warn("Unable to cleanup tasks from cgroup '%s'" % cgroup)

	def _cgroup_cleanup_tasks(self):
		if self._cgroup is not None and not self._cgroup in self._cgroups:
			self._cgroup_cleanup_tasks_one(self._cgroup)
		for cg in self._cgroups:
			self._cgroup_cleanup_tasks_one(cg)

	def _instance_unapply_static(self, instance, rollback = consts.ROLLBACK_SOFT):
		super(SchedulerPlugin, self)._instance_unapply_static(instance, rollback)
		if self._daemon and instance._runtime_tuning:
			instance._terminate.set()
			instance._thread.join()
		self._restore_ps_affinity()
		self._cgroup_restore_affinity()
		self._cgroup_cleanup_tasks()
		if self._cgroup_groups_init or self._cgroup_mount_point_init:
			self._cgroup_finalize_groups()
		if self._cgroup_mount_point_init:
			self._cgroup_finalize()

	def _cgroup_verify_affinity_one(self, cgroup, affinity):
		log.debug("Verifying cgroup '%s' affinity" % cgroup)
		path = "%s/%s/%s" % (self._cgroup_mount_point, cgroup, "cpuset.cpus")
		current_affinity = self._cmd.read_file(path, err_ret = "ERR", no_error = True)
		if current_affinity == "ERR":
			return True
		current_affinity = self._cmd.cpulist2string(self._cmd.cpulist_pack(current_affinity))
		affinity = self._cmd.cpulist2string(self._cmd.cpulist_pack(affinity))
		affinity_description = "cgroup '%s' affinity" % cgroup
		if current_affinity == affinity:
			log.info(consts.STR_VERIFY_PROFILE_VALUE_OK
					% (affinity_description, current_affinity))
			return True
		else:
			log.error(consts.STR_VERIFY_PROFILE_VALUE_FAIL
					% (affinity_description, current_affinity,
					affinity))
			return False

	def _cgroup_verify_affinity(self):
		log.debug("Veryfying cgroups affinities")
		ret = True
		if self._affinity is not None and self._cgroup is not None and not self._cgroup in self._cgroups:
			ret = ret and self._cgroup_verify_affinity_one(self._cgroup, self._affinity)
		for cg in self._cgroups.items():
			ret = ret and self._cgroup_verify_affinity_one(cg[0], cg[1])
		return ret

	def _instance_verify_static(self, instance, ignore_missing, devices):
		ret1 = super(SchedulerPlugin, self)._instance_verify_static(instance, ignore_missing, devices)
		ret2 = self._cgroup_verify_affinity()
		return ret1 and ret2

	def _add_pid(self, instance, pid, r):
		try:
			cmd = self._get_cmdline(pid)
		except (OSError, IOError) as e:
			if e.errno == errno.ENOENT \
					or e.errno == errno.ESRCH:
				log.debug("Failed to get cmdline of PID %d, the task vanished."
						% pid)
			else:
				log.error("Failed to get cmdline of PID %d: %s"
						% (pid, e))
			return
		v = self._cmd.re_lookup(instance._sched_lookup, cmd, r)
		if v is not None and not pid in self._scheduler_original:
			log.debug("tuning new process '%s' with PID '%d' by '%s'" % (cmd, pid, str(v)))
			(sched, prio, affinity) = v
			self._tune_process(pid, cmd, sched, prio,
					affinity)
			self._storage.set(self._scheduler_storage_key,
					self._scheduler_original)

	def _remove_pid(self, instance, pid):
		if pid in self._scheduler_original:
			del self._scheduler_original[pid]
			log.debug("removed PID %d from the rollback database" % pid)
			self._storage.set(self._scheduler_storage_key,
					self._scheduler_original)

	def _thread_code(self, instance):
		r = self._cmd.re_lookup_compile(instance._sched_lookup)
		poll = select.poll()
		# Store the file objects in a local variable so that they don't
		# go out of scope too soon. This is a workaround for
		# python3-perf bug rhbz#1659445.
		fds = instance._evlist.get_pollfd()
		for fd in fds:
			poll.register(fd)
		while not instance._terminate.is_set():
			# timeout to poll in milliseconds
			if len(poll.poll(self._sleep_interval * 1000)) > 0 and not instance._terminate.is_set():
				read_events = True
				while read_events:
					read_events = False
					for cpu in self._cpus:
						event = instance._evlist.read_on_cpu(cpu)
						if event:
							read_events = True
							if event.type == perf.RECORD_COMM or \
								(self._perf_process_fork_value and event.type == perf.RECORD_FORK):
								self._add_pid(instance, int(event.tid), r)
							elif event.type == perf.RECORD_EXIT:
								self._remove_pid(instance, int(event.tid))

	@command_custom("cgroup_ps_blacklist", per_device = False)
	def _cgroup_ps_blacklist(self, enabling, value, verify, ignore_missing):
		# currently unsupported
		if verify:
			return None
		if enabling and value is not None:
			self._cgroup_ps_blacklist_re = "|".join(["(%s)" % v for v in re.split(r"(?<!\\);", str(value))])

	@command_custom("ps_whitelist", per_device = False)
	def _ps_whitelist(self, enabling, value, verify, ignore_missing):
		# currently unsupported
		if verify:
			return None
		if enabling and value is not None:
			self._ps_whitelist = "|".join(["(%s)" % v for v in re.split(r"(?<!\\);", str(value))])

	@command_custom("ps_blacklist", per_device = False)
	def _ps_blacklist(self, enabling, value, verify, ignore_missing):
		# currently unsupported
		if verify:
			return None
		if enabling and value is not None:
			self._ps_blacklist = "|".join(["(%s)" % v for v in re.split(r"(?<!\\);", str(value))])

	@command_custom("irq_process", per_device = False)
	def _irq_process(self, enabling, value, verify, ignore_missing):
		# currently unsupported
		if verify:
			return None
		if enabling and value is not None:
			self._irq_process = self._cmd.get_bool(value) == "1"

	@command_custom("default_irq_smp_affinity", per_device = False)
	def _default_irq_smp_affinity(self, enabling, value, verify, ignore_missing):
		# currently unsupported
		if verify:
			return None
		if enabling and value is not None:
			if value in ["calc", "ignore"]:
				self._default_irq_smp_affinity_value = value
			else:
				self._default_irq_smp_affinity_value = self._cmd.cpulist_unpack(value)

	@command_custom("perf_process_fork", per_device = False)
	def _perf_process_fork(self, enabling, value, verify, ignore_missing):
		# currently unsupported
		if verify:
			return None
		if enabling and value is not None:
			self._perf_process_fork_value = self._cmd.get_bool(value) == "1"

	# Raises OSError
	# Raises SystemError with old (pre-0.4) python-schedutils
	# instead of OSError
	# If PID doesn't exist, errno == ESRCH
	def _get_affinity(self, pid):
		res = self._scheduler_utils.get_affinity(pid)
		log.debug("Read affinity '%s' of PID %d" % (res, pid))
		return res

	def _set_affinity(self, pid, affinity):
		log.debug("Setting CPU affinity of PID %d to '%s'." % (pid, affinity))
		try:
			self._scheduler_utils.set_affinity(pid, affinity)
			return True
		# Workaround for old python-schedutils (pre-0.4) which
		# incorrectly raised SystemError instead of OSError
		except (SystemError, OSError) as e:
			if hasattr(e, "errno") and e.errno == errno.ESRCH:
				log.debug("Failed to set affinity of PID %d, the task vanished."
						% pid)
			else:
				res = self._affinity_changeable(pid)
				if res == 1 or res == -2:
					log.error("Failed to set affinity of PID %d to '%s': %s"
							% (pid, affinity, e))
			return False

	# returns intersection of affinity1 with affinity2, if intersection is empty it returns affinity3
	def _get_intersect_affinity(self, affinity1, affinity2, affinity3):
		aff = set(affinity1).intersection(set(affinity2))
		if aff:
			return list(aff)
		return affinity3

	def _set_all_obj_affinity(self, objs, affinity, threads = False):
		psl = [v for v in objs if re.search(self._ps_whitelist,
				self._get_stat_comm(v)) is not None]
		if self._ps_blacklist != "":
			psl = [v for v in psl if re.search(self._ps_blacklist,
					self._get_stat_comm(v)) is None]
		if self._cgroup_ps_blacklist_re != "":
			psl = [v for v in psl if re.search(self._cgroup_ps_blacklist_re,
					self._get_stat_cgroup(v)) is None]
		psd = dict([(v.pid, v) for v in psl])
		for pid in psd:
			try:
				cmd = self._get_cmdline(psd[pid])
			except (OSError, IOError) as e:
				if e.errno == errno.ENOENT \
						or e.errno == errno.ESRCH:
					log.debug("Failed to get cmdline of PID %d, the task vanished."
							% pid)
				else:
					log.error("Refusing to set affinity of PID %d, failed to get its cmdline: %s"
							% (pid, e))
				continue
			cont = self._tune_process_affinity(pid, affinity,
					intersect = True)
			if not cont:
				continue
			if pid in self._scheduler_original:
				self._scheduler_original[pid].cmdline = cmd
			# process threads
			if not threads and "threads" in psd[pid]:
				self._set_all_obj_affinity(
						psd[pid]["threads"].values(),
						affinity, True)

	def _get_stat_cgroup(self, o):
		try:
			return o["cgroups"]
		except (OSError, IOError, KeyError):
			return ""

	def _get_stat_comm(self, o):
		try:
			return o["stat"]["comm"]
		except (OSError, IOError, KeyError):
			return ""

	def _set_ps_affinity(self, affinity):
		try:
			ps = procfs.pidstats()
			ps.reload_threads()
			self._set_all_obj_affinity(ps.values(), affinity, False)
		except (OSError, IOError) as e:
			log.error("error applying tuning, cannot get information about running processes: %s"
					% e)

	# Returns 0 on success, -2 if changing the affinity is not
	# supported, -1 if some other error occurs.
	def _set_irq_affinity(self, irq, affinity, restoring):
		try:
			affinity_hex = self._cmd.cpulist2hex(affinity)
			log.debug("Setting SMP affinity of IRQ %s to '%s'"
					% (irq, affinity_hex))
			filename = "/proc/irq/%s/smp_affinity" % irq
			with open(filename, "w") as f:
				f.write(affinity_hex)
			return 0
		except (OSError, IOError) as e:
			# EIO is returned by
			# kernel/irq/proc.c:write_irq_affinity() if changing
			# the affinity is not supported
			# (at least on kernels 3.10 and 4.18)
			if hasattr(e, "errno") and e.errno == errno.EIO \
					and not restoring:
				log.debug("Setting SMP affinity of IRQ %s is not supported"
						% irq)
				return -2
			else:
				log.error("Failed to set SMP affinity of IRQ %s to '%s': %s"
						% (irq, affinity_hex, e))
				return -1

	def _set_default_irq_affinity(self, affinity):
		try:
			affinity_hex = self._cmd.cpulist2hex(affinity)
			log.debug("Setting default SMP IRQ affinity to '%s'"
					% affinity_hex)
			with open("/proc/irq/default_smp_affinity", "w") as f:
				f.write(affinity_hex)
		except (OSError, IOError) as e:
			log.error("Failed to set default SMP IRQ affinity to '%s': %s"
					% (affinity_hex, e))

	def _set_all_irq_affinity(self, affinity):
		irq_original = IRQAffinities()
		irqs = procfs.interrupts()
		for irq in irqs.keys():
			try:
				prev_affinity = irqs[irq]["affinity"]
				log.debug("Read affinity of IRQ '%s': '%s'"
						% (irq, prev_affinity))
			except KeyError:
				continue
			_affinity = self._get_intersect_affinity(prev_affinity, affinity, affinity)
			if set(_affinity) == set(prev_affinity):
				continue
			res = self._set_irq_affinity(irq, _affinity, False)
			if res == 0:
				irq_original.irqs[irq] = prev_affinity
			elif res == -2:
				irq_original.unchangeable.append(irq)

		# default affinity
		prev_affinity_hex = self._cmd.read_file("/proc/irq/default_smp_affinity")
		prev_affinity = self._cmd.hex2cpulist(prev_affinity_hex)
		if self._default_irq_smp_affinity_value == "calc":
			_affinity = self._get_intersect_affinity(prev_affinity, affinity, affinity)
		elif self._default_irq_smp_affinity_value != "ignore":
			_affinity = self._default_irq_smp_affinity_value
		if self._default_irq_smp_affinity_value != "ignore":
			self._set_default_irq_affinity(_affinity)
			irq_original.default = prev_affinity
		self._storage.set(self._irq_storage_key, irq_original)

	def _restore_all_irq_affinity(self):
		irq_original = self._storage.get(self._irq_storage_key, None)
		if irq_original is None:
			return
		for irq, affinity in irq_original.irqs.items():
			self._set_irq_affinity(irq, affinity, True)
		if self._default_irq_smp_affinity_value != "ignore":
			affinity = irq_original.default
			self._set_default_irq_affinity(affinity)
		self._storage.unset(self._irq_storage_key)

	def _verify_irq_affinity(self, irq_description, correct_affinity,
			current_affinity):
		res = set(current_affinity).issubset(set(correct_affinity))
		if res:
			log.info(consts.STR_VERIFY_PROFILE_VALUE_OK
					% (irq_description, current_affinity))
		else:
			log.error(consts.STR_VERIFY_PROFILE_VALUE_FAIL
					% (irq_description, current_affinity,
					correct_affinity))
		return res

	def _verify_all_irq_affinity(self, correct_affinity, ignore_missing):
		irq_original = self._storage.get(self._irq_storage_key, None)
		irqs = procfs.interrupts()
		res = True
		for irq in irqs.keys():
			if irq in irq_original.unchangeable and ignore_missing:
				description = "IRQ %s does not support changing SMP affinity" % irq
				log.info(consts.STR_VERIFY_PROFILE_VALUE_MISSING % description)
				continue
			try:
				current_affinity = irqs[irq]["affinity"]
				log.debug("Read SMP affinity of IRQ '%s': '%s'"
						% (irq, current_affinity))
				irq_description = "SMP affinity of IRQ %s" % irq
				if not self._verify_irq_affinity(
						irq_description,
						correct_affinity,
						current_affinity):
					res = False
			except KeyError:
				continue

		current_affinity_hex = self._cmd.read_file(
				"/proc/irq/default_smp_affinity")
		current_affinity = self._cmd.hex2cpulist(current_affinity_hex)
		if self._default_irq_smp_affinity_value != "ignore" and not self._verify_irq_affinity("default IRQ SMP affinity",
				current_affinity, correct_affinity if self._default_irq_smp_affinity_value == "calc" else
				self._default_irq_smp_affinity_value):
			res = False
		return res

	@command_custom("isolated_cores", per_device = False, priority = 10)
	def _isolated_cores(self, enabling, value, verify, ignore_missing):
		affinity = None
		self._affinity = None
		if value is not None:
			isolated = set(self._cmd.cpulist_unpack(value))
			present = set(self._cpus)
			if isolated.issubset(present):
				affinity = list(present - isolated)
				self._affinity = self._cmd.cpulist2string(affinity)
			else:
				str_cpus = self._cmd.cpulist2string(self._cpus)
				log.error("Invalid isolated_cores specified, '%s' does not match available cores '%s'"
						% (value, str_cpus))
		if (enabling or verify) and affinity is None:
			return None
		# currently only IRQ affinity verification is supported
		if verify:
			if self._irq_process:
				return self._verify_all_irq_affinity(affinity, ignore_missing)
			return True
		elif enabling:
			if self._cgroup:
				self._cgroup_set_affinity()
				ps_affinity = "cgroup.%s" % self._cgroup
			else:
				ps_affinity = affinity
			self._set_ps_affinity(ps_affinity)
			if self._irq_process:
				self._set_all_irq_affinity(affinity)
		else:
			# Restoring processes' affinity is done in
			# _instance_unapply_static()
			if self._irq_process:
				self._restore_all_irq_affinity()

	def _get_sched_knob_path(self, prefix, namespace, knob):
		key = "%s_%s_%s" % (prefix, namespace, knob)
		path = self._sched_knob_paths_cache.get(key)
		if path:
			return path
		path = "/proc/sys/kernel/%s_%s" % (namespace, knob)
		if not os.path.exists(path):
			if prefix == "":
				path = "%s/%s" % (namespace, knob)
			else:
				path = "%s/%s/%s" % (prefix, namespace, knob)
			path = "/sys/kernel/debug/%s" % path
			if self._secure_boot_hint is None:
				self._secure_boot_hint = True
		self._sched_knob_paths_cache[key] = path
		return path

	def _get_sched_knob(self, prefix, namespace, knob):
		data = self._cmd.read_file(self._get_sched_knob_path(prefix, namespace, knob), err_ret = None)
		if data is None:
			log.error("Error reading '%s'" % knob)
			if self._secure_boot_hint:
				log.error("This may not work with Secure Boot or kernel_lockdown (this hint is logged only once)")
				self._secure_boot_hint = False
		return data

	def _set_sched_knob(self, prefix, namespace, knob, value, sim, remove = False):
		if value is None:
			return None
		if not sim:
			if not self._cmd.write_to_file(self._get_sched_knob_path(prefix, namespace, knob), value, \
				no_error = [errno.ENOENT] if remove else False):
					log.error("Error writing value '%s' to '%s'" % (value, knob))
		return value

	@command_get("sched_min_granularity_ns")
	def _get_sched_min_granularity_ns(self):
		return self._get_sched_knob("", "sched", "min_granularity_ns")

	@command_set("sched_min_granularity_ns")
	def _set_sched_min_granularity_ns(self, value, sim, remove):
		return self._set_sched_knob("", "sched", "min_granularity_ns", value, sim, remove)

	@command_get("sched_latency_ns")
	def _get_sched_latency_ns(self):
		return self._get_sched_knob("", "sched", "latency_ns")

	@command_set("sched_latency_ns")
	def _set_sched_latency_ns(self, value, sim, remove):
		return self._set_sched_knob("", "sched", "latency_ns", value, sim, remove)

	@command_get("sched_wakeup_granularity_ns")
	def _get_sched_wakeup_granularity_ns(self):
		return self._get_sched_knob("", "sched", "wakeup_granularity_ns")

	@command_set("sched_wakeup_granularity_ns")
	def _set_sched_wakeup_granularity_ns(self, value, sim, remove):
		return self._set_sched_knob("", "sched", "wakeup_granularity_ns", value, sim, remove)

	@command_get("sched_tunable_scaling")
	def _get_sched_tunable_scaling(self):
		return self._get_sched_knob("", "sched", "tunable_scaling")

	@command_set("sched_tunable_scaling")
	def _set_sched_tunable_scaling(self, value, sim, remove):
		return self._set_sched_knob("", "sched", "tunable_scaling", value, sim, remove)

	@command_get("sched_migration_cost_ns")
	def _get_sched_migration_cost_ns(self):
		return self._get_sched_knob("", "sched", "migration_cost_ns")

	@command_set("sched_migration_cost_ns")
	def _set_sched_migration_cost_ns(self, value, sim, remove):
		return self._set_sched_knob("", "sched", "migration_cost_ns", value, sim, remove)

	@command_get("sched_nr_migrate")
	def _get_sched_nr_migrate(self):
		return self._get_sched_knob("", "sched", "nr_migrate")

	@command_set("sched_nr_migrate")
	def _set_sched_nr_migrate(self, value, sim, remove):
		return self._set_sched_knob("", "sched", "nr_migrate", value, sim, remove)

	@command_get("numa_balancing_scan_delay_ms")
	def _get_numa_balancing_scan_delay_ms(self):
		return self._get_sched_knob("sched", "numa_balancing", "scan_delay_ms")

	@command_set("numa_balancing_scan_delay_ms")
	def _set_numa_balancing_scan_delay_ms(self, value, sim, remove):
		return self._set_sched_knob("sched", "numa_balancing", "scan_delay_ms", value, sim, remove)

	@command_get("numa_balancing_scan_period_min_ms")
	def _get_numa_balancing_scan_period_min_ms(self):
		return self._get_sched_knob("sched", "numa_balancing", "scan_period_min_ms")

	@command_set("numa_balancing_scan_period_min_ms")
	def _set_numa_balancing_scan_period_min_ms(self, value, sim, remove):
		return self._set_sched_knob("sched", "numa_balancing", "scan_period_min_ms", value, sim, remove)

	@command_get("numa_balancing_scan_period_max_ms")
	def _get_numa_balancing_scan_period_max_ms(self):
		return self._get_sched_knob("sched", "numa_balancing", "scan_period_max_ms")

	@command_set("numa_balancing_scan_period_max_ms")
	def _set_numa_balancing_scan_period_max_ms(self, value, sim, remove):
		return self._set_sched_knob("sched", "numa_balancing", "scan_period_max_ms", value, sim, remove)

	@command_get("numa_balancing_scan_size_mb")
	def _get_numa_balancing_scan_size_mb(self):
		return self._get_sched_knob("sched", "numa_balancing", "scan_size_mb")

	@command_set("numa_balancing_scan_size_mb")
	def _set_numa_balancing_scan_size_mb(self, value, sim, remove):
		return self._set_sched_knob("sched", "numa_balancing", "scan_size_mb", value, sim, remove)
plugins/plugin_modules.py000064400000011475150510354010011627 0ustar00import re
import os.path
from . import base
from .decorators import *
import tuned.logs
from subprocess import *
from tuned.utils.commands import commands
import tuned.consts as consts

log = tuned.logs.get()

class ModulesPlugin(base.Plugin):
	"""
	`modules`::
	
	Plug-in for applying custom kernel modules options.
	+
	This plug-in can set parameters to kernel modules. It creates
	`/etc/modprobe.d/tuned.conf` file. The syntax is
	`_module_=_option1=value1 option2=value2..._` where `_module_` is
	the module name and `_optionx=valuex_` are module options which may
	or may not be present.
	+
	.Load module `netrom` with module parameter `nr_ndevs=2`
	====
	----
	[modules]
	netrom=nr_ndevs=2
	----
	====
	Modules can also be forced to load/reload by using an additional
	`+r` option prefix.
	+
	.(Re)load module `netrom` with module parameter `nr_ndevs=2`
	====
	----
	[modules]
	netrom=+r nr_ndevs=2
	----
	====
	The `+r` switch will also cause *TuneD* to try and remove `netrom`
	module (if loaded) and try and (re)insert it with the specified
	parameters. The `+r` can be followed by an optional comma (`+r,`)
	for better readability.
	+
	When using `+r` the module will be loaded immediately by the *TuneD*
	daemon itself rather than waiting for the OS to load it with the
	specified parameters.
	"""

	def __init__(self, *args, **kwargs):
		super(ModulesPlugin, self).__init__(*args, **kwargs)
		self._has_dynamic_options = True
		self._cmd = commands()

	def _instance_init(self, instance):
		instance._has_dynamic_tuning = False
		instance._has_static_tuning = True
		instance._modules = instance.options

	def _instance_cleanup(self, instance):
		pass

	def _reload_modules(self, modules):
		for module in modules:
			retcode, out = self._cmd.execute(["modprobe", "-r", module])
			if retcode < 0:
				log.warn("'modprobe' command not found, cannot reload kernel modules, reboot is required")
				return
			elif retcode > 0:
				log.debug("cannot remove kernel module '%s': %s" % (module, out.strip()))
			retcode, out = self._cmd.execute(["modprobe", module])
			if retcode != 0:
				log.warn("cannot insert/reinsert module '%s', reboot is required: %s" % (module, out.strip()))

	def _instance_apply_static(self, instance):
		self._clear_modprobe_file()
		s = ""
		retcode = 0
		skip_check = False
		reload_list = []
		for option, value in list(instance._modules.items()):
			module = self._variables.expand(option)
			v = self._variables.expand(value)
			if not skip_check:
				retcode, out = self._cmd.execute(["modinfo", module])
				if retcode < 0:
					skip_check = True
					log.warn("'modinfo' command not found, not checking kernel modules")
				elif retcode > 0:
					log.error("kernel module '%s' not found, skipping it" % module)
			if skip_check or retcode == 0:
				if len(v) > 1 and v[0:2] == "+r":
					v = re.sub(r"^\s*\+r\s*,?\s*", "", v)
					reload_list.append(module)
				if len(v) > 0:
					s += "options " + module + " " + v + "\n"
				else:
					log.debug("module '%s' doesn't have any option specified, not writing it to modprobe.d" % module)
		self._cmd.write_to_file(consts.MODULES_FILE, s)
		l = len(reload_list)
		if l > 0:
			self._reload_modules(reload_list)
			if len(instance._modules) != l:
				log.info(consts.STR_HINT_REBOOT)

	def _unquote_path(self, path):
		return str(path).replace("/", "")

	def _instance_verify_static(self, instance, ignore_missing, devices):
		ret = True
		# not all modules exports all their parameteters through sysfs, so hardcode check with ignore_missing
		ignore_missing = True
		r = re.compile(r"\s+")
		for option, value in list(instance._modules.items()):
			module = self._variables.expand(option)
			v = self._variables.expand(value)
			v = re.sub(r"^\s*\+r\s*,?\s*", "", v)
			mpath = "/sys/module/%s" % module
			if not os.path.exists(mpath):
				ret = False
				log.error(consts.STR_VERIFY_PROFILE_FAIL % "module '%s' is not loaded" % module)
			else:
				log.info(consts.STR_VERIFY_PROFILE_OK % "module '%s' is loaded" % module)
				l = r.split(v)
				for item in l:
					arg = item.split("=")
					if len(arg) != 2:
						log.warn("unrecognized module option for module '%s': %s" % (module, item))
					else:
						if self._verify_value(arg[0], arg[1],
							self._cmd.read_file(mpath + "/parameters/" + self._unquote_path(arg[0]), err_ret = None, no_error = True),
							ignore_missing) == False:
								ret = False
		return ret

	def _instance_unapply_static(self, instance, rollback = consts.ROLLBACK_SOFT):
		if rollback == consts.ROLLBACK_FULL:
			self._clear_modprobe_file()

	def _clear_modprobe_file(self):
		s = self._cmd.read_file(consts.MODULES_FILE, no_error = True)
		l = s.split("\n")
		i = j = 0
		ll = len(l)
		r = re.compile(r"^\s*#")
		while i < ll:
			if r.search(l[i]) is None:
				j = i
				i = ll
			i += 1
		s = "\n".join(l[0:j])
		if len(s) > 0:
			s += "\n"
		self._cmd.write_to_file(consts.MODULES_FILE, s)
plugins/plugin_uncore.py000064400000011163150510354010011444 0ustar00from . import hotplug
from .decorators import *
import tuned.logs
from tuned.utils.commands import commands

import os
import fnmatch

log = tuned.logs.get()
cmd = commands()

SYSFS_DIR = "/sys/devices/system/cpu/intel_uncore_frequency/"

IS_MIN = 0
IS_MAX = 1

class UncorePlugin(hotplug.Plugin):
	"""
	`uncore`::

	`max_freq_khz, min_freq_khz`:::
	Limit the maximum and minumum uncore frequency.

	Those options are Intel specific and correspond directly to `sysfs` files
	exposed by Intel uncore frequency driver.
	====
	----
	[uncore]
	max_freq_khz=4000000
	----
	Using this options *TuneD* will limit maximum frequency of all uncore units
	on the Intel system to 4 GHz.
	====
	"""

	def _init_devices(self):
		self._devices_supported = True
		self._assigned_devices = set()
		self._free_devices = set()
		self._is_tpmi = False

		try:
			devices = os.listdir(SYSFS_DIR)
		except OSError:
			return

		# For new TPMI interface use only uncore devices
		tpmi_devices = fnmatch.filter(devices, 'uncore*')
		if len(tpmi_devices) > 0:
			self._is_tpmi = True  # Not used at present but can be usefull in future
			devices = tpmi_devices

		for d in devices:
			self._free_devices.add(d)

		log.debug("devices: %s", str(self._free_devices))

	def _instance_init(self, instance):
		instance._has_static_tuning = True
		instance._has_dynamic_tuning = False

	def _instance_cleanup(self, instance):
		pass

	def _get(self, dev_dir, file):
		sysfs_file = SYSFS_DIR + dev_dir + "/" + file
		value = cmd.read_file(sysfs_file)
		if len(value) > 0:
			return int(value)
		return None

	def _set(self, dev_dir, file, value):
		sysfs_file = SYSFS_DIR + dev_dir + "/" + file
		if cmd.write_to_file(sysfs_file, "%u" % value):
			return value
		return None

	@classmethod
	def _get_config_options(cls):
		return {
			"max_freq_khz": None,
			"min_freq_khz": None,
		}

	def _validate_value(self, device, min_or_max, value):
		try:
			freq_khz = int(value)
		except ValueError:
			log.error("value '%s' is not integer" % value)
			return None

		try:
			initial_max_freq_khz = self._get(device, "initial_max_freq_khz")
			initial_min_freq_khz = self._get(device, "initial_min_freq_khz")
			max_freq_khz = self._get(device, "max_freq_khz")
			min_freq_khz = self._get(device, "min_freq_khz")
		except (OSError, IOError):
			log.error("fail to read inital uncore frequency values")
			return None

		if min_or_max == IS_MAX:
			if freq_khz < min_freq_khz:
				log.error("%s: max_freq_khz %d value below min_freq_khz %d" % (device, freq_khz, min_freq_khz))
				return None

			if freq_khz > initial_max_freq_khz:
				log.info("%s: max_freq_khz %d value above initial_max_freq_khz - capped to %d" % (device, freq_khz, initial_max_freq_khz))
				freq_khz = initial_max_freq_khz

		elif min_or_max == IS_MIN:
			if freq_khz > max_freq_khz:
				log.error("%s: min_freq_khz %d value above max_freq_khz %d" % (device, freq_khz, max_freq_khz))
				return None

			if freq_khz < initial_min_freq_khz:
				log.info("%s: min_freq_khz %d value below initial_max_freq_khz - capped to %d" % (device, freq_khz, initial_min_freq_khz))
				freq_khz = initial_min_freq_khz

		else:
			return None

		return freq_khz

	@command_set("max_freq_khz", per_device = True)
	def _set_max_freq_khz(self, value, device, sim, remove):
		max_freq_khz = self._validate_value(device, IS_MAX, value)
		if max_freq_khz is None:
			return None

		if sim:
			return max_freq_khz

		log.debug("%s: set max_freq_khz %d" % (device, max_freq_khz))
		return self._set(device, "max_freq_khz", max_freq_khz)

	@command_get("max_freq_khz")
	def _get_max_freq_khz(self, device, ignore_missing=False):
		if ignore_missing and not os.path.isdir(SYSFS_DIR):
			return None

		try:
			max_freq_khz = self._get(device, "max_freq_khz")
		except (OSError, IOError):
			log.error("fail to read uncore frequency values")
			return None

		log.debug("%s: get max_freq_khz %d" % (device, max_freq_khz))
		return max_freq_khz

	@command_set("min_freq_khz", per_device = True)
	def _set_min_freq_khz(self, value, device, sim, remove):
		min_freq_khz = self._validate_value(device, IS_MIN, value)
		if min_freq_khz is None:
			return None

		if sim:
			return min_freq_khz

		log.debug("%s: set min_freq_khz %d" % (device, min_freq_khz))
		return self._set(device, "min_freq_khz", min_freq_khz)

	@command_get("min_freq_khz")
	def _get_min_freq_khz(self, device, ignore_missing=False):
		if ignore_missing and not os.path.isdir(SYSFS_DIR):
			return None

		try:
			min_freq_khz = self._get(device, "min_freq_khz")
		except (OSError, IOError):
			log.error("fail to read uncore frequency values")
			return None

		log.debug("%s: get min_freq_khz %d" % (device, min_freq_khz))
		return min_freq_khz
plugins/decorators.py000064400000001727150510354010010745 0ustar00__all__ = ["command_set", "command_get", "command_custom"]

#	@command_set("scheduler", per_device=True)
#	def set_scheduler(self, value, device):
#		set_new_scheduler
#
#	@command_get("scheduler")
#	def get_scheduler(self, device):
#		return current_scheduler
#
#	@command_set("foo")
#	def set_foo(self, value):
#		set_new_foo
#
#	@command_get("foo")
#	def get_foo(self):
#		return current_foo
#

def command_set(name, per_device=False, priority=0):
	def wrapper(method):
		method._command = {
			"set": True,
			"name": name,
			"per_device": per_device,
			"priority": priority,
		}
		return method

	return wrapper

def command_get(name):
	def wrapper(method):
		method._command = {
			"get": True,
			"name": name,
		}
		return method
	return wrapper

def command_custom(name, per_device=False, priority=0):
	def wrapper(method):
		method._command = {
			"custom": True,
			"name": name,
			"per_device": per_device,
			"priority": priority,
		}
		return method
	return wrapper
plugins/base.py000064400000053621150510354010007512 0ustar00import re
import tuned.consts as consts
import tuned.profiles.variables
import tuned.logs
import collections
from tuned.utils.commands import commands
import os
from subprocess import Popen, PIPE

log = tuned.logs.get()

class Plugin(object):
	"""
	Base class for all plugins.

	Plugins change various system settings in order to get desired performance or power
	saving. Plugins use Monitor objects to get information from the running system.

	Intentionally a lot of logic is included in the plugin to increase plugin flexibility.
	"""

	def __init__(self, monitors_repository, storage_factory, hardware_inventory, device_matcher, device_matcher_udev, instance_factory, global_cfg, variables):
		"""Plugin constructor."""

		self._storage = storage_factory.create(self.__class__.__name__)
		self._monitors_repository = monitors_repository
		self._hardware_inventory = hardware_inventory
		self._device_matcher = device_matcher
		self._device_matcher_udev = device_matcher_udev
		self._instance_factory = instance_factory

		self._instances = collections.OrderedDict()
		self._init_commands()

		self._global_cfg = global_cfg
		self._variables = variables
		self._has_dynamic_options = False
		self._devices_inited = False

		self._options_used_by_dynamic = self._get_config_options_used_by_dynamic()

		self._cmd = commands()

	def cleanup(self):
		self.destroy_instances()

	def init_devices(self):
		if not self._devices_inited:
			self._init_devices()
			self._devices_inited = True

	@property
	def name(self):
		return self.__class__.__module__.split(".")[-1].split("_", 1)[1]

	#
	# Plugin configuration manipulation and helpers.
	#

	@classmethod
	def _get_config_options(self):
		"""Default configuration options for the plugin."""
		return {}

	@classmethod
	def get_config_options_hints(cls):
		"""Explanation of each config option function"""
		return {}

	@classmethod
	def _get_config_options_used_by_dynamic(self):
		"""List of config options used by dynamic tuning. Their previous values will be automatically saved and restored."""
		return []

	def _get_effective_options(self, options):
		"""Merge provided options with plugin default options."""
		# TODO: _has_dynamic_options is a hack
		effective = self._get_config_options().copy()
		for key in options:
			if key in effective or self._has_dynamic_options:
				effective[key] = options[key]
			else:
				log.warn("Unknown option '%s' for plugin '%s'." % (key, self.__class__.__name__))
		return effective

	def _option_bool(self, value):
		if type(value) is bool:
			return value
		value = str(value).lower()
		return value == "true" or value == "1"

	#
	# Interface for manipulation with instances of the plugin.
	#

	def create_instance(self, name, devices_expression, devices_udev_regex, script_pre, script_post, options):
		"""Create new instance of the plugin and seize the devices."""
		if name in self._instances:
			raise Exception("Plugin instance with name '%s' already exists." % name)

		effective_options = self._get_effective_options(options)
		instance = self._instance_factory.create(self, name, devices_expression, devices_udev_regex, \
			script_pre, script_post, effective_options)
		self._instances[name] = instance

		return instance

	def destroy_instance(self, instance):
		"""Destroy existing instance."""
		if instance._plugin != self:
			raise Exception("Plugin instance '%s' does not belong to this plugin '%s'." % (instance, self))
		if instance.name not in self._instances:
			raise Exception("Plugin instance '%s' was already destroyed." % instance)

		instance = self._instances[instance.name]
		self._destroy_instance(instance)
		del self._instances[instance.name]

	def initialize_instance(self, instance):
		"""Initialize an instance."""
		log.debug("initializing instance %s (%s)" % (instance.name, self.name))
		self._instance_init(instance)

	def destroy_instances(self):
		"""Destroy all instances."""
		for instance in list(self._instances.values()):
			log.debug("destroying instance %s (%s)" % (instance.name, self.name))
			self._destroy_instance(instance)
		self._instances.clear()

	def _destroy_instance(self, instance):
		self.release_devices(instance)
		self._instance_cleanup(instance)

	def _instance_init(self, instance):
		raise NotImplementedError()

	def _instance_cleanup(self, instance):
		raise NotImplementedError()

	#
	# Devices handling
	#

	def _init_devices(self):
		self._devices_supported = False
		self._assigned_devices = set()
		self._free_devices = set()

	def _get_device_objects(self, devices):
		"""Override this in a subclass to transform a list of device names (e.g. ['sda'])
		   to a list of pyudev.Device objects, if your plugin supports it"""
		return None

	def _get_matching_devices(self, instance, devices):
		if instance.devices_udev_regex is None:
			return set(self._device_matcher.match_list(instance.devices_expression, devices))
		else:
			udev_devices = self._get_device_objects(devices)
			if udev_devices is None:
				log.error("Plugin '%s' does not support the 'devices_udev_regex' option", self.name)
				return set()
			udev_devices = self._device_matcher_udev.match_list(instance.devices_udev_regex, udev_devices)
			return set([x.sys_name for x in udev_devices])

	def assign_free_devices(self, instance):
		if not self._devices_supported:
			return

		log.debug("assigning devices to instance %s" % instance.name)
		to_assign = self._get_matching_devices(instance, self._free_devices)
		instance.active = len(to_assign) > 0
		if not instance.active:
			log.warn("instance %s: no matching devices available" % instance.name)
		else:
			name = instance.name
			if instance.name != self.name:
				name += " (%s)" % self.name
			log.info("instance %s: assigning devices %s" % (name, ", ".join(to_assign)))
			instance.assigned_devices.update(to_assign) # cannot use |=
			self._assigned_devices |= to_assign
			self._free_devices -= to_assign

	def release_devices(self, instance):
		if not self._devices_supported:
			return

		to_release = (instance.processed_devices \
				| instance.assigned_devices) \
				& self._assigned_devices

		instance.active = False
		instance.processed_devices.clear()
		instance.assigned_devices.clear()
		self._assigned_devices -= to_release
		self._free_devices |= to_release

	#
	# Tuning activation and deactivation.
	#

	def _run_for_each_device(self, instance, callback, devices):
		if not self._devices_supported:
			devices = [None, ]

		for device in devices:
			callback(instance, device)

	def _instance_pre_static(self, instance, enabling):
		pass

	def _instance_post_static(self, instance, enabling):
		pass

	def _call_device_script(self, instance, script, op, devices, rollback = consts.ROLLBACK_SOFT):
		if script is None:
			return None
		if len(devices) == 0:
			log.warn("Instance '%s': no device to call script '%s' for." % (instance.name, script))
			return None
		if not script.startswith("/"):
			log.error("Relative paths cannot be used in script_pre or script_post. " \
				+ "Use ${i:PROFILE_DIR}.")
			return False
		dir_name = os.path.dirname(script)
		ret = True
		for dev in devices:
			environ = os.environ
			environ.update(self._variables.get_env())
			arguments = [op]
			if rollback == consts.ROLLBACK_FULL:
				arguments.append("full_rollback")
			arguments.append(dev)
			log.info("calling script '%s' with arguments '%s'" % (script, str(arguments)))
			log.debug("using environment '%s'" % str(list(environ.items())))
			try:
				proc = Popen([script] +  arguments, \
						stdout=PIPE, stderr=PIPE, \
						close_fds=True, env=environ, \
						cwd = dir_name, universal_newlines = True)
				out, err = proc.communicate()
				if proc.returncode:
					log.error("script '%s' error: %d, '%s'" % (script, proc.returncode, err[:-1]))
					ret = False
			except (OSError,IOError) as e:
				log.error("script '%s' error: %s" % (script, e))
				ret = False
		return ret

	def instance_apply_tuning(self, instance):
		"""
		Apply static and dynamic tuning if the plugin instance is active.
		"""
		if not instance.active:
			return

		if instance.has_static_tuning:
			self._call_device_script(instance, instance.script_pre,
					"apply", instance.assigned_devices)
			self._instance_pre_static(instance, True)
			self._instance_apply_static(instance)
			self._instance_post_static(instance, True)
			self._call_device_script(instance, instance.script_post,
					"apply", instance.assigned_devices)
		if instance.has_dynamic_tuning and self._global_cfg.get(consts.CFG_DYNAMIC_TUNING, consts.CFG_DEF_DYNAMIC_TUNING):
			self._run_for_each_device(instance, self._instance_apply_dynamic, instance.assigned_devices)
		instance.processed_devices.update(instance.assigned_devices)
		instance.assigned_devices.clear()

	def instance_verify_tuning(self, instance, ignore_missing):
		"""
		Verify static tuning if the plugin instance is active.
		"""
		if not instance.active:
			return None

		if len(instance.assigned_devices) != 0:
			log.error("BUG: Some devices have not been tuned: %s"
					% ", ".join(instance.assigned_devices))
		devices = instance.processed_devices.copy()
		if instance.has_static_tuning:
			if self._call_device_script(instance, instance.script_pre, "verify", devices) == False:
				return False
			if self._instance_verify_static(instance, ignore_missing, devices) == False:
				return False
			if self._call_device_script(instance, instance.script_post, "verify", devices) == False:
				return False
			return True
		else:
			return None

	def instance_update_tuning(self, instance):
		"""
		Apply dynamic tuning if the plugin instance is active.
		"""
		if not instance.active:
			return
		if instance.has_dynamic_tuning and self._global_cfg.get(consts.CFG_DYNAMIC_TUNING, consts.CFG_DEF_DYNAMIC_TUNING):
			self._run_for_each_device(instance, self._instance_update_dynamic, instance.processed_devices.copy())

	def instance_unapply_tuning(self, instance, rollback = consts.ROLLBACK_SOFT):
		"""
		Remove all tunings applied by the plugin instance.
		"""
		if rollback == consts.ROLLBACK_NONE:
			return

		if instance.has_dynamic_tuning and self._global_cfg.get(consts.CFG_DYNAMIC_TUNING, consts.CFG_DEF_DYNAMIC_TUNING):
			self._run_for_each_device(instance, self._instance_unapply_dynamic, instance.processed_devices)
		if instance.has_static_tuning:
			self._call_device_script(instance, instance.script_post,
					"unapply", instance.processed_devices,
					rollback = rollback)
			self._instance_pre_static(instance, False)
			self._instance_unapply_static(instance, rollback)
			self._instance_post_static(instance, False)
			self._call_device_script(instance, instance.script_pre, "unapply", instance.processed_devices, rollback = rollback)

	def _instance_apply_static(self, instance):
		self._execute_all_non_device_commands(instance)
		self._execute_all_device_commands(instance, instance.assigned_devices)

	def _instance_verify_static(self, instance, ignore_missing, devices):
		ret = True
		if self._verify_all_non_device_commands(instance, ignore_missing) == False:
			ret = False
		if self._verify_all_device_commands(instance, devices, ignore_missing) == False:
			ret = False
		return ret

	def _instance_unapply_static(self, instance, rollback = consts.ROLLBACK_SOFT):
		self._cleanup_all_device_commands(instance,
				instance.processed_devices)
		self._cleanup_all_non_device_commands(instance)

	def _instance_apply_dynamic(self, instance, device):
		for option in [opt for opt in self._options_used_by_dynamic if self._storage_get(instance, self._commands[opt], device) is None]:
			self._check_and_save_value(instance, self._commands[option], device)

		self._instance_update_dynamic(instance, device)

	def _instance_unapply_dynamic(self, instance, device):
		raise NotImplementedError()

	def _instance_update_dynamic(self, instance, device):
		raise NotImplementedError()

	#
	# Registration of commands for static plugins.
	#

	def _init_commands(self):
		"""
		Initialize commands.
		"""
		self._commands = collections.OrderedDict()
		self._autoregister_commands()
		self._check_commands()

	def _autoregister_commands(self):
		"""
		Register all commands marked using @command_set, @command_get, and @command_custom decorators.
		"""
		for member_name in self.__class__.__dict__:
			if member_name.startswith("__"):
				continue
			member = getattr(self, member_name)
			if not hasattr(member, "_command"):
				continue

			command_name = member._command["name"]
			info = self._commands.get(command_name, {"name": command_name})

			if "set" in member._command:
				info["custom"] = None
				info["set"] = member
				info["per_device"] = member._command["per_device"]
				info["priority"] = member._command["priority"]
			elif "get" in member._command:
				info["get"] = member
			elif "custom" in member._command:
				

Batosay - 2023
IDNSEO Team