3.2.5.2 Prozesskontrollblock unter Linux

[gesichtete Version][gesichtete Version]
Zeile 37: Zeile 37:
<p>
<p>
<loop_listing title="Auszug aus dem Quelltext der Datei sched.h aus dem Linux-Kernel, Version 3.13.0" description="">
<loop_listing title="Auszug aus dem Quelltext der Datei sched.h aus dem Linux-Kernel, Version 3.13.0" description="">
<source lang="c" line="true">
struct task_struct {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
Zeile 455: Zeile 454:
#endif
#endif
};
};
</source>
</loop_listing>
</loop_listing>
</p>
</p>

Version vom 18. Oktober 2014, 20:35 Uhr

Prozesskontrollblock unter Linux


Beispiel: Prozesskontrollblock unter Linux

Hinweis

Weiterführende Literatur

Achilles 2006 zeigt in Kapitel 3.1 den Linux Process Control Block. Die Lektüre dieser Quelle sei ausdrücklich empfohlen.

Studierende sind oftmals berechtigt, eine PDF-Version dieses Buches ohne entstehende Kosten über ihre Hochschulen von Springerlink zu beziehen.


Das folgende Videos zeigt, wie man in den Quelltexten des Linux-Kernels die Deklaration des Linux-Prozesskontrollblocks (task_struct) findet, und wie der Zusammenhang mit der Prozesstabelle ist.

video


task_struct: Deklaration des Linux-Prozesskontrollblocks

Der folgende Auszug aus der Quelltext-Datei sched.h des Linux-Kernels (Version 3.13.0) zeigt die Deklaration der Datenstruktur task_struct. Dabei handelt es sich um den Prozesskontrollblock, wie er von Linux verwendet wird.


struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ void *stack; atomic_t usage; unsigned int flags; /* per process flags, defined below */ unsigned int ptrace;

  1. ifdef CONFIG_SMP

struct llist_node wake_entry; int on_cpu; struct task_struct *last_wakee; unsigned long wakee_flips; unsigned long wakee_flip_decay_ts;

int wake_cpu;

  1. endif

int on_rq;

int prio, static_prio, normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt;

  1. ifdef CONFIG_CGROUP_SCHED

struct task_group *sched_task_group;

  1. endif
  1. ifdef CONFIG_PREEMPT_NOTIFIERS

/* list of struct preempt_notifier: */ struct hlist_head preempt_notifiers;

  1. endif
  1. ifdef CONFIG_BLK_DEV_IO_TRACE

unsigned int btrace_seq;

  1. endif

unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed;

  1. ifdef CONFIG_PREEMPT_RCU

int rcu_read_lock_nesting; char rcu_read_unlock_special; struct list_head rcu_node_entry;

  1. endif /* #ifdef CONFIG_PREEMPT_RCU */
  2. ifdef CONFIG_TREE_PREEMPT_RCU

struct rcu_node *rcu_blocked_node;

  1. endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
  2. ifdef CONFIG_RCU_BOOST

struct rt_mutex *rcu_boost_mutex;

  1. endif /* #ifdef CONFIG_RCU_BOOST */
  1. if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)

struct sched_info sched_info;

  1. endif

struct list_head tasks;

  1. ifdef CONFIG_SMP

struct plist_node pushable_tasks;

  1. endif

struct mm_struct *mm, *active_mm;

  1. ifdef CONFIG_COMPAT_BRK

unsigned brk_randomized:1;

  1. endif
  2. if defined(SPLIT_RSS_COUNTING)

struct task_rss_stat rss_stat;

  1. endif

/* task state */ int exit_state; int exit_code, exit_signal; int pdeath_signal; /* The signal sent when the parent dies */ unsigned int jobctl; /* JOBCTL_*, siglock protected */

/* Used for emulating ABI behavior of previous Linux versions */ unsigned int personality;

unsigned did_exec:1; unsigned in_execve:1; /* Tell the LSMs that the process is doing an * execve */ unsigned in_iowait:1;

/* task may not gain privileges */ unsigned no_new_privs:1;

/* Revert to default priority/policy when forking */ unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1;

pid_t pid; pid_t tgid;

  1. ifdef CONFIG_CC_STACKPROTECTOR

/* Canary value for the -fstack-protector gcc feature */ unsigned long stack_canary;

  1. endif

/* * pointers to (original) parent process, youngest child, younger sibling, * older sibling, respectively. (p->father can be replaced with * p->real_parent->pid) */ struct task_struct __rcu *real_parent; /* real parent process */ struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ /* * children/sibling forms the list of my natural children */ struct list_head children; /* list of my children */ struct list_head sibling; /* linkage in my parent's children list */ struct task_struct *group_leader; /* threadgroup leader */

/* * ptraced is the list of tasks this task is using ptrace on. * This includes both natural children and PTRACE_ATTACH targets. * p->ptrace_entry is p's link on the p->parent->ptraced list. */ struct list_head ptraced; struct list_head ptrace_entry;

/* PID/PID hash table linkage. */ struct pid_link pids[PIDTYPE_MAX]; struct list_head thread_group; struct list_head thread_node;

struct completion *vfork_done; /* for vfork() */ int __user *set_child_tid; /* CLONE_CHILD_SETTID */ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */

cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime;

  1. ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE

struct cputime prev_cputime;

  1. endif
  2. ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN

seqlock_t vtime_seqlock; unsigned long long vtime_snap; enum { VTIME_SLEEPING = 0, VTIME_USER, VTIME_SYS, } vtime_snap_whence;

  1. endif

unsigned long nvcsw, nivcsw; /* context switch counts */ struct timespec start_time; /* monotonic time */ struct timespec real_start_time; /* boot based time */ /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ unsigned long min_flt, maj_flt;

struct task_cputime cputime_expires; struct list_head cpu_timers[3];

/* process credentials */ const struct cred __rcu *real_cred; /* objective and real subjective task * credentials (COW) */ const struct cred __rcu *cred; /* effective (overridable) subjective task * credentials (COW) */ char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock it with task_lock()) - initialized normally by setup_new_exec */ /* file system info */ int link_count, total_link_count;

  1. ifdef CONFIG_SYSVIPC

/* ipc stuff */ struct sysv_sem sysvsem;

  1. endif
  2. ifdef CONFIG_DETECT_HUNG_TASK

/* hung task detection */ unsigned long last_switch_count;

  1. endif

/* CPU-specific state of this task */ struct thread_struct thread; /* filesystem information */ struct fs_struct *fs; /* open file information */ struct files_struct *files; /* namespaces */ struct nsproxy *nsproxy; /* signal handlers */ struct signal_struct *signal; struct sighand_struct *sighand;

sigset_t blocked, real_blocked; sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ struct sigpending pending;

unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *priv); void *notifier_data; sigset_t *notifier_mask; struct callback_head *task_works;

struct audit_context *audit_context;

  1. ifdef CONFIG_AUDITSYSCALL

kuid_t loginuid; unsigned int sessionid;

  1. endif

struct seccomp seccomp;

/* Thread group tracking */

  	u32 parent_exec_id;
  	u32 self_exec_id;

/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,

* mempolicy */

spinlock_t alloc_lock;

/* Protection of the PI data structures: */ raw_spinlock_t pi_lock;

  1. ifdef CONFIG_RT_MUTEXES

/* PI waiters blocked on a rt_mutex held by this task */ struct plist_head pi_waiters; /* Deadlock detection and priority inheritance handling */ struct rt_mutex_waiter *pi_blocked_on;

  1. endif
  1. ifdef CONFIG_DEBUG_MUTEXES

/* mutex deadlock detection */ struct mutex_waiter *blocked_on;

  1. endif
  2. ifdef CONFIG_TRACE_IRQFLAGS

unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context;

  1. endif
  2. ifdef CONFIG_LOCKDEP
  3. define MAX_LOCK_DEPTH 48UL

u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[MAX_LOCK_DEPTH]; gfp_t lockdep_reclaim_gfp;

  1. endif

/* journalling filesystem info */ void *journal_info;

/* stacked block device info */ struct bio_list *bio_list;

  1. ifdef CONFIG_BLOCK

/* stack plugging */ struct blk_plug *plug;

  1. endif

/* VM state */ struct reclaim_state *reclaim_state;

struct backing_dev_info *backing_dev_info;

struct io_context *io_context;

unsigned long ptrace_message; siginfo_t *last_siginfo; /* For ptrace use. */ struct task_io_accounting ioac;

  1. if defined(CONFIG_TASK_XACCT)

u64 acct_rss_mem1; /* accumulated rss usage */ u64 acct_vm_mem1; /* accumulated virtual memory usage */ cputime_t acct_timexpd; /* stime + utime since last update */

  1. endif
  2. ifdef CONFIG_CPUSETS

nodemask_t mems_allowed; /* Protected by alloc_lock */ seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor;

  1. endif
  2. ifdef CONFIG_CGROUPS

/* Control Group info protected by css_set_lock */ struct css_set __rcu *cgroups; /* cg_list protected by css_set_lock and tsk->alloc_lock */ struct list_head cg_list;

  1. endif
  2. ifdef CONFIG_FUTEX

struct robust_list_head __user *robust_list;

  1. ifdef CONFIG_COMPAT

struct compat_robust_list_head __user *compat_robust_list;

  1. endif

struct list_head pi_state_list; struct futex_pi_state *pi_state_cache;

  1. endif
  2. ifdef CONFIG_PERF_EVENTS

struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; struct mutex perf_event_mutex; struct list_head perf_event_list;

  1. endif
  2. ifdef CONFIG_NUMA

struct mempolicy *mempolicy; /* Protected by alloc_lock */ short il_next; short pref_node_fork;

  1. endif
  2. ifdef CONFIG_NUMA_BALANCING

int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; int numa_migrate_deferred; unsigned long numa_migrate_retry; u64 node_stamp; /* migration stamp */ struct callback_head numa_work;

struct list_head numa_entry; struct numa_group *numa_group;

/* * Exponential decaying average of faults on a per-node basis. * Scheduling placement decisions are made based on the these counts. * The values remain static for the duration of a PTE scan */ unsigned long *numa_faults; unsigned long total_numa_faults;

/* * numa_faults_buffer records faults per node during the current * scan window. When the scan completes, the counts in numa_faults * decay and these values are copied. */ unsigned long *numa_faults_buffer;

/* * numa_faults_locality tracks if faults recorded during the last * scan window were remote/local. The task scan period is adapted * based on the locality of the faults with different weights * depending on whether they were shared or private faults */ unsigned long numa_faults_locality[2];

unsigned long numa_pages_migrated;

  1. endif /* CONFIG_NUMA_BALANCING */

struct rcu_head rcu;

/* * cache last used pipe for splice */ struct pipe_inode_info *splice_pipe;

struct page_frag task_frag;

  1. ifdef CONFIG_TASK_DELAY_ACCT

struct task_delay_info *delays;

  1. endif
  2. ifdef CONFIG_FAULT_INJECTION

int make_it_fail;

  1. endif

/* * when (nr_dirtied >= nr_dirtied_pause), it's time to call * balance_dirty_pages() for some dirty throttling pause */ int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; /* start of a write-and-pause period */

  1. ifdef CONFIG_LATENCYTOP

int latency_record_count; struct latency_record latency_record[LT_SAVECOUNT];

  1. endif

/* * time slack values; these are used to round up poll() and * select() etc timeout values. These are in nanoseconds. */ unsigned long timer_slack_ns; unsigned long default_timer_slack_ns;

  1. ifdef CONFIG_FUNCTION_GRAPH_TRACER

/* Index of current stored address in ret_stack */ int curr_ret_stack; /* Stack of return addresses for return function tracing */ struct ftrace_ret_stack *ret_stack; /* time stamp for last schedule */ unsigned long long ftrace_timestamp; /* * Number of functions that haven't been traced * because of depth overrun. */ atomic_t trace_overrun; /* Pause for the tracing */ atomic_t tracing_graph_pause;

  1. endif
  2. ifdef CONFIG_TRACING

/* state flags for use by tracers */ unsigned long trace; /* bitmask and counter of trace recursion */ unsigned long trace_recursion;

  1. endif /* CONFIG_TRACING */
  2. ifdef CONFIG_MEMCG /* memcg uses this to do batch job */

struct memcg_batch_info { int do_batch; /* incremented when batch uncharge started */ struct mem_cgroup *memcg; /* target memcg of uncharge */ unsigned long nr_pages; /* uncharged usage */ unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ } memcg_batch; unsigned int memcg_kmem_skip_account; struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; int order; unsigned int may_oom:1; } memcg_oom;

  1. endif
  2. ifdef CONFIG_UPROBES

struct uprobe_task *utask;

  1. endif
  2. if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)

unsigned int sequential_io; unsigned int sequential_io_avg;

  1. endif

};



Diese Seite steht unter der Creative Commons Namensnennung 3.0 Unported Lizenz http://i.creativecommons.org/l/by/3.0/80x15.png