Das folgende Videos zeigt, wie man in den Quelltexten des Linux-Kernels die Deklaration des Linux-Prozesskontrollblocks (task_struct) findet, und wie der Zusammenhang mit der Prozesstabelle ist.
Wenn Sie dieses Element öffnen, werden Inhalte von externen Dienstleistern geladen und dadurch Ihre IP-Adresse an diese übertragen.
Der folgende Auszug aus der Quelltext-Datei sched.h des Linux-Kernels (Version 3.13.0) zeigt die Deklaration der Datenstruktur task_struct. Dabei handelt es sich um den Prozesskontrollblock, wie er von Linux verwendet wird.
1 struct task_struct {
2 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
3 void *stack;
4 atomic_t usage;
5 unsigned int flags; /* per process flags, defined below */
6 unsigned int ptrace;
7
8 #ifdef CONFIG_SMP
9 struct llist_node wake_entry;
10 int on_cpu;
11 struct task_struct *last_wakee;
12 unsigned long wakee_flips;
13 unsigned long wakee_flip_decay_ts;
14
15 int wake_cpu;
16 #endif
17 int on_rq;
18
19 int prio, static_prio, normal_prio;
20 unsigned int rt_priority;
21 const struct sched_class *sched_class;
22 struct sched_entity se;
23 struct sched_rt_entity rt;
24 #ifdef CONFIG_CGROUP_SCHED
25 struct task_group *sched_task_group;
26 #endif
27
28 #ifdef CONFIG_PREEMPT_NOTIFIERS
29 /* list of struct preempt_notifier: */
30 struct hlist_head preempt_notifiers;
31 #endif
32
33 #ifdef CONFIG_BLK_DEV_IO_TRACE
34 unsigned int btrace_seq;
35 #endif
36
37 unsigned int policy;
38 int nr_cpus_allowed;
39 cpumask_t cpus_allowed;
40
41 #ifdef CONFIG_PREEMPT_RCU
42 int rcu_read_lock_nesting;
43 char rcu_read_unlock_special;
44 struct list_head rcu_node_entry;
45 #endif /* #ifdef CONFIG_PREEMPT_RCU */
46 #ifdef CONFIG_TREE_PREEMPT_RCU
47 struct rcu_node *rcu_blocked_node;
48 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
49 #ifdef CONFIG_RCU_BOOST
50 struct rt_mutex *rcu_boost_mutex;
51 #endif /* #ifdef CONFIG_RCU_BOOST */
52
53 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
54 struct sched_info sched_info;
55 #endif
56
57 struct list_head tasks;
58 #ifdef CONFIG_SMP
59 struct plist_node pushable_tasks;
60 #endif
61
62 struct mm_struct *mm, *active_mm;
63 #ifdef CONFIG_COMPAT_BRK
64 unsigned brk_randomized:1;
65 #endif
66 #if defined(SPLIT_RSS_COUNTING)
67 struct task_rss_stat rss_stat;
68 #endif
69 /* task state */
70 int exit_state;
71 int exit_code, exit_signal;
72 int pdeath_signal; /* The signal sent when the parent dies */
73 unsigned int jobctl; /* JOBCTL_*, siglock protected */
74
75 /* Used for emulating ABI behavior of previous Linux versions */
76 unsigned int personality;
77
78 unsigned did_exec:1;
79 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
80 * execve */
81 unsigned in_iowait:1;
82
83 /* task may not gain privileges */
84 unsigned no_new_privs:1;
85
86 /* Revert to default priority/policy when forking */
87 unsigned sched_reset_on_fork:1;
88 unsigned sched_contributes_to_load:1;
89
90 pid_t pid;
91 pid_t tgid;
92
93 #ifdef CONFIG_CC_STACKPROTECTOR
94 /* Canary value for the -fstack-protector gcc feature */
95 unsigned long stack_canary;
96 #endif
97 /*
98 * pointers to (original) parent process, youngest child, younger sibling,
99 * older sibling, respectively. (p->father can be replaced with
100 * p->real_parent->pid)
101 */
102 struct task_struct __rcu *real_parent; /* real parent process */
103 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
104 /*
105 * children/sibling forms the list of my natural children
106 */
107 struct list_head children; /* list of my children */
108 struct list_head sibling; /* linkage in my parent's children list */
109 struct task_struct *group_leader; /* threadgroup leader */
110
111 /*
112 * ptraced is the list of tasks this task is using ptrace on.
113 * This includes both natural children and PTRACE_ATTACH targets.
114 * p->ptrace_entry is p's link on the p->parent->ptraced list.
115 */
116 struct list_head ptraced;
117 struct list_head ptrace_entry;
118
119 /* PID/PID hash table linkage. */
120 struct pid_link pids[PIDTYPE_MAX];
121 struct list_head thread_group;
122 struct list_head thread_node;
123
124 struct completion *vfork_done; /* for vfork() */
125 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
126 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
127
128 cputime_t utime, stime, utimescaled, stimescaled;
129 cputime_t gtime;
130 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
131 struct cputime prev_cputime;
132 #endif
133 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
134 seqlock_t vtime_seqlock;
135 unsigned long long vtime_snap;
136 enum {
137 VTIME_SLEEPING = 0,
138 VTIME_USER,
139 VTIME_SYS,
140 } vtime_snap_whence;
141 #endif
142 unsigned long nvcsw, nivcsw; /* context switch counts */
143 struct timespec start_time; /* monotonic time */
144 struct timespec real_start_time; /* boot based time */
145 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
146 unsigned long min_flt, maj_flt;
147
148 struct task_cputime cputime_expires;
149 struct list_head cpu_timers[3];
150
151 /* process credentials */
152 const struct cred __rcu *real_cred; /* objective and real subjective task
153 * credentials (COW) */
154 const struct cred __rcu *cred; /* effective (overridable) subjective task
155 * credentials (COW) */
156 char comm[TASK_COMM_LEN]; /* executable name excluding path
157 - access with [gs]et_task_comm (which lock
158 it with task_lock())
159 - initialized normally by setup_new_exec */
160 /* file system info */
161 int link_count, total_link_count;
162 #ifdef CONFIG_SYSVIPC
163 /* ipc stuff */
164 struct sysv_sem sysvsem;
165 #endif
166 #ifdef CONFIG_DETECT_HUNG_TASK
167 /* hung task detection */
168 unsigned long last_switch_count;
169 #endif
170 /* CPU-specific state of this task */
171 struct thread_struct thread;
172 /* filesystem information */
173 struct fs_struct *fs;
174 /* open file information */
175 struct files_struct *files;
176 /* namespaces */
177 struct nsproxy *nsproxy;
178 /* signal handlers */
179 struct signal_struct *signal;
180 struct sighand_struct *sighand;
181
182 sigset_t blocked, real_blocked;
183 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
184 struct sigpending pending;
185
186 unsigned long sas_ss_sp;
187 size_t sas_ss_size;
188 int (*notifier)(void *priv);
189 void *notifier_data;
190 sigset_t *notifier_mask;
191 struct callback_head *task_works;
192
193 struct audit_context *audit_context;
194 #ifdef CONFIG_AUDITSYSCALL
195 kuid_t loginuid;
196 unsigned int sessionid;
197 #endif
198 struct seccomp seccomp;
199
200 /* Thread group tracking */
201 u32 parent_exec_id;
202 u32 self_exec_id;
203 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
204 * mempolicy */
205 spinlock_t alloc_lock;
206
207 /* Protection of the PI data structures: */
208 raw_spinlock_t pi_lock;
209
210 #ifdef CONFIG_RT_MUTEXES
211 /* PI waiters blocked on a rt_mutex held by this task */
212 struct plist_head pi_waiters;
213 /* Deadlock detection and priority inheritance handling */
214 struct rt_mutex_waiter *pi_blocked_on;
215 #endif
216
217 #ifdef CONFIG_DEBUG_MUTEXES
218 /* mutex deadlock detection */
219 struct mutex_waiter *blocked_on;
220 #endif
221 #ifdef CONFIG_TRACE_IRQFLAGS
222 unsigned int irq_events;
223 unsigned long hardirq_enable_ip;
224 unsigned long hardirq_disable_ip;
225 unsigned int hardirq_enable_event;
226 unsigned int hardirq_disable_event;
227 int hardirqs_enabled;
228 int hardirq_context;
229 unsigned long softirq_disable_ip;
230 unsigned long softirq_enable_ip;
231 unsigned int softirq_disable_event;
232 unsigned int softirq_enable_event;
233 int softirqs_enabled;
234 int softirq_context;
235 #endif
236 #ifdef CONFIG_LOCKDEP
237 # define MAX_LOCK_DEPTH 48UL
238 u64 curr_chain_key;
239 int lockdep_depth;
240 unsigned int lockdep_recursion;
241 struct held_lock held_locks[MAX_LOCK_DEPTH];
242 gfp_t lockdep_reclaim_gfp;
243 #endif
244
245 /* journalling filesystem info */
246 void *journal_info;
247
248 /* stacked block device info */
249 struct bio_list *bio_list;
250
251 #ifdef CONFIG_BLOCK
252 /* stack plugging */
253 struct blk_plug *plug;
254 #endif
255
256 /* VM state */
257 struct reclaim_state *reclaim_state;
258
259 struct backing_dev_info *backing_dev_info;
260
261 struct io_context *io_context;
262
263 unsigned long ptrace_message;
264 siginfo_t *last_siginfo; /* For ptrace use. */
265 struct task_io_accounting ioac;
266 #if defined(CONFIG_TASK_XACCT)
267 u64 acct_rss_mem1; /* accumulated rss usage */
268 u64 acct_vm_mem1; /* accumulated virtual memory usage */
269 cputime_t acct_timexpd; /* stime + utime since last update */
270 #endif
271 #ifdef CONFIG_CPUSETS
272 nodemask_t mems_allowed; /* Protected by alloc_lock */
273 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
274 int cpuset_mem_spread_rotor;
275 int cpuset_slab_spread_rotor;
276 #endif
277 #ifdef CONFIG_CGROUPS
278 /* Control Group info protected by css_set_lock */
279 struct css_set __rcu *cgroups;
280 /* cg_list protected by css_set_lock and tsk->alloc_lock */
281 struct list_head cg_list;
282 #endif
283 #ifdef CONFIG_FUTEX
284 struct robust_list_head __user *robust_list;
285 #ifdef CONFIG_COMPAT
286 struct compat_robust_list_head __user *compat_robust_list;
287 #endif
288 struct list_head pi_state_list;
289 struct futex_pi_state *pi_state_cache;
290 #endif
291 #ifdef CONFIG_PERF_EVENTS
292 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
293 struct mutex perf_event_mutex;
294 struct list_head perf_event_list;
295 #endif
296 #ifdef CONFIG_NUMA
297 struct mempolicy *mempolicy; /* Protected by alloc_lock */
298 short il_next;
299 short pref_node_fork;
300 #endif
301 #ifdef CONFIG_NUMA_BALANCING
302 int numa_scan_seq;
303 unsigned int numa_scan_period;
304 unsigned int numa_scan_period_max;
305 int numa_preferred_nid;
306 int numa_migrate_deferred;
307 unsigned long numa_migrate_retry;
308 u64 node_stamp; /* migration stamp */
309 struct callback_head numa_work;
310
311 struct list_head numa_entry;
312 struct numa_group *numa_group;
313
314 /*
315 * Exponential decaying average of faults on a per-node basis.
316 * Scheduling placement decisions are made based on the these counts.
317 * The values remain static for the duration of a PTE scan
318 */
319 unsigned long *numa_faults;
320 unsigned long total_numa_faults;
321
322 /*
323 * numa_faults_buffer records faults per node during the current
324 * scan window. When the scan completes, the counts in numa_faults
325 * decay and these values are copied.
326 */
327 unsigned long *numa_faults_buffer;
328
329 /*
330 * numa_faults_locality tracks if faults recorded during the last
331 * scan window were remote/local. The task scan period is adapted
332 * based on the locality of the faults with different weights
333 * depending on whether they were shared or private faults
334 */
335 unsigned long numa_faults_locality[2];
336
337 unsigned long numa_pages_migrated;
338 #endif /* CONFIG_NUMA_BALANCING */
339
340 struct rcu_head rcu;
341
342 /*
343 * cache last used pipe for splice
344 */
345 struct pipe_inode_info *splice_pipe;
346
347 struct page_frag task_frag;
348
349 #ifdef CONFIG_TASK_DELAY_ACCT
350 struct task_delay_info *delays;
351 #endif
352 #ifdef CONFIG_FAULT_INJECTION
353 int make_it_fail;
354 #endif
355 /*
356 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
357 * balance_dirty_pages() for some dirty throttling pause
358 */
359 int nr_dirtied;
360 int nr_dirtied_pause;
361 unsigned long dirty_paused_when; /* start of a write-and-pause period */
362
363 #ifdef CONFIG_LATENCYTOP
364 int latency_record_count;
365 struct latency_record latency_record[LT_SAVECOUNT];
366 #endif
367 /*
368 * time slack values; these are used to round up poll() and
369 * select() etc timeout values. These are in nanoseconds.
370 */
371 unsigned long timer_slack_ns;
372 unsigned long default_timer_slack_ns;
373
374 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
375 /* Index of current stored address in ret_stack */
376 int curr_ret_stack;
377 /* Stack of return addresses for return function tracing */
378 struct ftrace_ret_stack *ret_stack;
379 /* time stamp for last schedule */
380 unsigned long long ftrace_timestamp;
381 /*
382 * Number of functions that haven't been traced
383 * because of depth overrun.
384 */
385 atomic_t trace_overrun;
386 /* Pause for the tracing */
387 atomic_t tracing_graph_pause;
388 #endif
389 #ifdef CONFIG_TRACING
390 /* state flags for use by tracers */
391 unsigned long trace;
392 /* bitmask and counter of trace recursion */
393 unsigned long trace_recursion;
394 #endif /* CONFIG_TRACING */
395 #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
396 struct memcg_batch_info {
397 int do_batch; /* incremented when batch uncharge started */
398 struct mem_cgroup *memcg; /* target memcg of uncharge */
399 unsigned long nr_pages; /* uncharged usage */
400 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
401 } memcg_batch;
402 unsigned int memcg_kmem_skip_account;
403 struct memcg_oom_info {
404 struct mem_cgroup *memcg;
405 gfp_t gfp_mask;
406 int order;
407 unsigned int may_oom:1;
408 } memcg_oom;
409 #endif
410 #ifdef CONFIG_UPROBES
411 struct uprobe_task *utask;
412 #endif
413 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
414 unsigned int sequential_io;
415 unsigned int sequential_io_avg;
416 #endif
417 };
Das folgende Bild wurde im Video erläutert:
Im Video wurde der Zusammenhang zwischen der Datenstruktur task_struct und den Spalten der Prozesstabelle erläutert.
Was schätzt du:
Aus wievielen Spalten besteht in etwa die Prozesstabelle?
Weiterführende Literatur
Achilles 2006 zeigt in Kapitel 3.1 den Linux Process Control Block. Die Lektüre dieser Quelle sei ausdrücklich empfohlen.
Studierende sind oftmals berechtigt, eine PDF-Version dieses Buches ohne entstehende Kosten über ihre Hochschulen von Springerlink zu beziehen.
Diese Seite steht unter der Creative Commons Namensnennung 3.0 Unported Lizenz http://i.creativecommons.org/l/by/3.0/80x15.png