对于一个软件工程师
来说,我并不是很关心 BootLoader
这些东西,几乎都比较接近于硬件,Linux
的代码又普遍太过于工业级,因此尝试用 rt-thread
这样一 个比较小型的 real time
系统来解析操作系统的工作。
For My Lovely Wife
环境准备 1 2 3 4 git clone https://github.com/RT-Thread/ cd rt-thread/bsp/qemu-vexpress-a9scons ./run-qemu.sh
Clock 时钟
建议初学者先阅读 内核基础
对于操作系统来说,我们需要依赖时间,无论是显示的时间,还是说我们的进程运行了多久,这些都非常依赖于一个硬件 晶振
,晶振的作用很简单,但是这种事比较初级的版本,后来在 CPU 中集成了 PIT
,Whatever 我们暂时不管具体怎么实现,对于 OS
来说,当时间发生了变化,CPU就会触发 interrupt
中断 对于不同的架构的 CPU
,中断的设计不太一样,这里我们就拿手机上的 Cortex-A53
作为例子。在 interrupt.h
暴露了如何注册中端函数的方法 rt_hw_interrupt_install
相对应的 BSP - qemu-vexpress-a9
中,我们可以找到注册的调用处
rt_hw_timer_init link 1 2 3 4 5 6 7 8 int rt_hw_timer_init (void ) { rt_hw_interrupt_install(IRQ_PBA8_TIMER2_3, rt_hw_timer_isr, RT_NULL, "tick" ); rt_hw_interrupt_umask(IRQ_PBA8_TIMER2_3); return 0 ; }
接下来,我们就看到我们软件部分的逻辑了
clock.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 void rt_tick_increase (void ) { struct rt_thread *thread ; #ifdef RT_USING_SMP rt_cpu_self()->tick ++; #else ++ rt_tick; #endif thread = rt_thread_self(); -- thread->remaining_tick; if (thread->remaining_tick == 0 ) { thread->remaining_tick = thread->init_tick; thread->stat |= RT_THREAD_STAT_YIELD; rt_schedule(); } rt_timer_check(); }
时钟函数是系统中最为简单的部分,仅仅用作计数,重新调度也仅仅是顺带而为之,那我们接下来看看进程调度。
进程调度 RT-Thread
线程管理的主要功能是对线程进行管理和调度,系统中总共存在两类线程,分别是系统线程和用户线程。对于进程来说,我们需要保留运行的状态进行调度。
rtdef.h link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 struct rt_thread { char name[RT_NAME_MAX]; rt_uint8_t type; rt_uint8_t flags; rt_list_t list ; rt_list_t tlist; void *sp; void *entry; void *parameter; void *stack_addr; rt_uint32_t stack_size; rt_err_t error; rt_uint8_t stat; rt_uint8_t current_priority; rt_uint8_t init_priority; rt_uint32_t number_mask; ...... rt_ubase_t init_tick; rt_ubase_t remaining_tick; struct rt_timer thread_timer ; void (*cleanup)(struct rt_thread *tid); rt_uint32_t user_data; };
对 RT-Thread
也包含了一组进程的状态切换。
Try it 我们修改 /rt-thread/bsp/qemu-vexpress-a9/applications/main.c
main.c 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 #include <rtthread.h> #define THREAD_STACK_SIZE 1024 #define THREAD_PRIORITY 20 #define THREAD_TIMESLICE 10 static void thread_entry (void * parameter) { rt_uint32_t value; rt_uint32_t count = 0 ; value = (rt_uint32_t )parameter; while (1 ) { if (0 == (count % 5 )) { rt_kprintf("thread %d is running ,thread %d count = %d\n" , value , value , count); if (count> 200 ) return ; } count++; } } int timeslice_sample (void ) { rt_thread_t tid = RT_NULL; tid = rt_thread_create("thread1" , thread_entry, (void *)1 , THREAD_STACK_SIZE, THREAD_PRIORITY, THREAD_TIMESLICE); if (tid != RT_NULL) rt_thread_startup(tid); tid = rt_thread_create("thread2" , thread_entry, (void *)2 , THREAD_STACK_SIZE, THREAD_PRIORITY, THREAD_TIMESLICE-5 ); if (tid != RT_NULL) rt_thread_startup(tid); return 0 ; } int main (void ) { timeslice_sample(); return 0 ; }
execute 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 $ scons $ ./qemu-nographic.sh msh />thread 1 is running ,thread 1 count = 0 thread 1 is running ,thread 1 count = 5 thread 1 is running ,thread 1 count = 10 thread 1 is running ,thread 1 count = 15 thread 1 is running ,thread 1 count = 20 thread 1 is running ,thread 1 count = 25 thread 1 is running ,thread 1 count = 30 thread 1 is running ,thread 1 count = 35 thread 1 is running ,thread 1 count = 40 thread 1 is running ,thread 1 count = 45 thread 1 is running ,thread 1 count = 50 thread 1 is running ,thread 1 count = 55 thread 1 is running ,thread 1 count = 60 thread 1 is running ,thread 1 count = 65 thread 1 is running ,thread 1 count = 70 thread 1 is running ,thread 1 count = 75 thread 1 is running ,thread 1 count = 80 thread 1 is running ,thread 1 count = 85 thread 1 is running ,thread 1 count = 90 thread 1 is running ,thread 1 count = 95 thread 1 is running ,thread 1 count = 100 thread 1 is running ,thread 1 count = 105 thread 1 is running ,thread 1 count = 110 thread 1 is running ,thread 1 count = 115 thread 1 is running ,thread 1 count = 1thread 2 is running ,thread 2 count = 0
由运行的计数结果可以看出,线程 2 的运行时间是线程 1 的一半。
线程的创建 对于线程的创建
thread.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 static rt_err_t _rt_thread_init(struct rt_thread *thread, const char *name, void (*entry)(void *parameter), void *parameter, void *stack_start, rt_uint32_t stack_size, rt_uint8_t priority, rt_uint32_t tick) { rt_list_init(&(thread->tlist)); thread->entry = (void *)entry; thread->parameter = parameter; thread->stack_addr = stack_start; thread->stack_size = stack_size; rt_memset(thread->stack_addr, '#' , thread->stack_size); thread->sp = (void *)rt_hw_stack_init(thread->entry, thread->parameter, (rt_uint8_t *)((char *)thread->stack_addr + thread->stack_size - sizeof (rt_ubase_t )), (void *)rt_thread_exit); RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX); thread->init_priority = priority; thread->current_priority = priority; thread->number_mask = 0 ; thread->init_tick = tick; thread->remaining_tick = tick; thread->error = RT_EOK; thread->stat = RT_THREAD_INIT; thread->cleanup = 0 ; thread->user_data = 0 ; rt_timer_init(&(thread->thread_timer), thread->name, rt_thread_timeout, thread, 0 , RT_TIMER_FLAG_ONE_SHOT); RT_OBJECT_HOOK_CALL(rt_thread_inited_hook, (thread)); return RT_EOK; }
其实也不用读的多么详细,大致上知道又这么多的字段需要维护。
线程的运行 使用 rt_err_t rt_thread_startup(rt_thread_t thread);
进行启动线程,而且启动的逻辑并不复杂。
thread.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 rt_err_t rt_thread_startup (rt_thread_t thread) { thread->current_priority = thread->init_priority; RT_DEBUG_LOG(RT_DEBUG_THREAD, ("startup a thread:%s with priority:%d\n" , thread->name, thread->init_priority)); thread->stat = RT_THREAD_SUSPEND; rt_thread_resume(thread); if (rt_thread_self() != RT_NULL) { rt_schedule(); } return RT_EOK; }
线程的调度 scheduler.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 void rt_schedule (void ) { rt_base_t level; struct rt_thread *to_thread ; struct rt_thread *current_thread ; struct rt_cpu *pcpu ; int cpu_id; level = rt_hw_interrupt_disable(); cpu_id = rt_hw_cpu_id(); pcpu = rt_cpu_index(cpu_id); current_thread = pcpu->current_thread; if (current_thread->scheduler_lock_nest == 1 ) { rt_ubase_t highest_ready_priority; if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0 ) { to_thread = _get_highest_priority_thread(&highest_ready_priority); current_thread->oncpu = RT_CPU_DETACHED; if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING) { if (current_thread->current_priority < highest_ready_priority) { to_thread = current_thread; } else if (current_thread->current_priority == highest_ready_priority && (current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0 ) { to_thread = current_thread; } else { rt_schedule_insert_thread(current_thread); } current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK; } to_thread->oncpu = cpu_id; if (to_thread != current_thread) { pcpu->current_priority = (rt_uint8_t )highest_ready_priority; RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread)); rt_schedule_remove_thread(to_thread); to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK); rt_hw_context_switch((rt_ubase_t )¤t_thread->sp, (rt_ubase_t )&to_thread->sp, to_thread); } } } rt_hw_interrupt_enable(level); return ; }
从代码分析,其实我们可以发现,RT Thread
如果包含高优先级的进程是不会给低优先级运行的机会的。我们修改代码
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 int timeslice_sample(void) { rt_thread_t tid = RT_NULL; tid = rt_thread_create("thread1", thread_entry, (void*)1, THREAD_STACK_SIZE, THREAD_PRIORITY - 1, THREAD_TIMESLICE); if (tid != RT_NULL) rt_thread_startup(tid); tid = rt_thread_create("thread2", thread_entry, (void*)2, THREAD_STACK_SIZE, THREAD_PRIORITY, THREAD_TIMESLICE-5); if (tid != RT_NULL) rt_thread_startup(tid); return 0; }
执行结果如下
exeute 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 thread 1 is running ,thread 1 count = msh />140 thread 1 is running ,thread 1 count = 145 thread 1 is running ,thread 1 count = 150 thread 1 is running ,thread 1 count = 155 thread 1 is running ,thread 1 count = 160 thread 1 is running ,thread 1 count = 165 thread 1 is running ,thread 1 count = 170 thread 1 is running ,thread 1 count = 175 thread 1 is running ,thread 2 counthread 2t = 0 thread 1 is run is running ,thread 1 count = 18ning ,thread 2 count = 5 thread 1 is running thread 1 is running ,thread 1 count ,thread 1 count = = 190 thread 191 thread is r2 is running ,threunnad 2 count = 10 thread 1 is trunnhreadi 1 is running ,ng ,thread 1 count = 205thread 2 countthr = 20 ead 2 is running ,thread 2 count = 20 thread 2 is running ,thread 2 count = 25 thread 2 is running ,thread 2 count = 30 thread 2 is running ,thread 2 count = 35 thread 2 is running ,thread 2 count = 40 thread 2 is running ,thread 2 count = 45 thread 2 is running ,thread 2 count = 50
我们可以发现 thread 1
执行完成之后才会有 thread 2
进行执行。至于 rt_hw_context_switch
就是将线程状态恢复到 寄存器
中
线程调度触发 什么时候会触发 rt_schedule
进行调度呢?这个事情其实绝大多数的系统都分为两类
主动型:调用 rt_schedule() | rt_thread_yield(),都属于线程将自己的执行放弃了
被动型:当我们的线程不满足条件的时候,比如获得锁失败等等
线程间同步 对于OS来说,一般有 信号量(semaphore)、互斥量(mutex),RT 还提供了 Event
这个模式
信号量 信号量的定义非常的简单
rt_semaphore link 1 2 3 4 5 6 7 struct rt_semaphore { struct rt_ipc_object parent ; rt_uint16_t value; rt_uint16_t reserved; };
由 IPC
容器所管理,信号量的最大值是 65535
How it works 说起来信号量本来也只是一个普通变量,因此就是分配在内核中的一个变量,在 rt_sem_create 中
ipc.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 rt_sem_t rt_sem_create (const char *name, rt_uint32_t value, rt_uint8_t flag) { rt_sem_t sem; sem = (rt_sem_t )rt_object_allocate(RT_Object_Class_Semaphore, name); if (sem == RT_NULL) return sem; rt_ipc_object_init(&(sem->parent)); sem->value = value; sem->parent.parent.flag = flag; return sem; }
当我们创建了 信号量 之后,就可以通过 rt_sem_take()
和 rt_sem_trytake()
进行操作,我们一窥究竟
ipc.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 rt_err_t rt_sem_take (rt_sem_t sem, rt_int32_t time) { register rt_base_t temp; struct rt_thread *thread ; temp = rt_hw_interrupt_disable(); if (sem->value > 0 ) { sem->value --; rt_hw_interrupt_enable(temp); } else { if (time == 0 ) { rt_hw_interrupt_enable(temp); return -RT_ETIMEOUT; } else { thread = rt_thread_self(); thread->error = RT_EOK; rt_ipc_list_suspend(&(sem->parent.suspend_thread), thread, sem->parent.parent.flag); if (time > 0 ) { rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &time); rt_timer_start(&(thread->thread_timer)); } rt_hw_interrupt_enable(temp); rt_schedule(); if (thread->error != RT_EOK) { return thread->error; } } } return RT_EOK; }
对于定时器
的部分,我们在后续再讨论。
互斥量 互斥量相较于信号量来说,互斥量支持递归访问且能防止线程优先级翻转 ;并且互斥量只能由持有线程释放,而信号量则可以由任何线程释放。
rtdef.h link 1 2 3 4 5 6 7 8 struct rt_mutex { struct rt_ipc_object parent ; rt_uint16_t value; rt_uint8_t original_priority; rt_uint8_t hold; struct rt_thread *owner ; };
How it works 我们创建 mutex
如下
ipc.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 rt_mutex_t rt_mutex_create (const char *name, rt_uint8_t flag) { struct rt_mutex *mutex ; mutex = (rt_mutex_t )rt_object_allocate(RT_Object_Class_Mutex, name); if (mutex == RT_NULL) return mutex; rt_ipc_object_init(&(mutex->parent)); mutex->value = 1 ; mutex->owner = RT_NULL; mutex->original_priority = 0xFF ; mutex->hold = 0 ; mutex->parent.parent.flag = flag; return mutex; }
当我们去 rt_mutex_take
获得一个 mutex
时候
ipc.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 rt_err_t rt_mutex_take (rt_mutex_t mutex, rt_int32_t time) { register rt_base_t temp; struct rt_thread *thread ; thread = rt_thread_self(); temp = rt_hw_interrupt_disable(); thread->error = RT_EOK; if (mutex->owner == thread) { } else { if (mutex->value > 0 ) { mutex->value --; mutex->owner = thread; mutex->original_priority = thread->current_priority; if (mutex->hold < RT_MUTEX_HOLD_MAX) { mutex->hold ++; } else { rt_hw_interrupt_enable(temp); return -RT_EFULL; } } else { if (time == 0 ) { thread->error = -RT_ETIMEOUT; rt_hw_interrupt_enable(temp); return -RT_ETIMEOUT; } else { if (thread->current_priority < mutex->owner->current_priority) { rt_thread_control(mutex->owner, RT_THREAD_CTRL_CHANGE_PRIORITY, &thread->current_priority); } rt_ipc_list_suspend(&(mutex->parent.suspend_thread), thread, mutex->parent.parent.flag); rt_hw_interrupt_enable(temp); rt_schedule(); } } } rt_hw_interrupt_enable(temp); return RT_EOK; }
RT-Thread 操作系统中,互斥量可以解决优先级翻转问题,实现的是优先级继承算法。优先级继承是通过在线程 A 尝试获取共享资源而被挂起的期间内,将线程 C 的优先级提升到线程 A 的优先级别,从而解决优先级翻转引起的问题。这样能够防止 C(间接地防止 A)被 B 抢占,如下图所示。优先级继承是指,提高某个占有某种资源的低优先级线程的优先级,使之与所有等待该资源的线程中优先级最高的那个线程的优先级相等,然后执行,而当这个低优先级线程释放该资源时,优先级重新回到初始设定。因此,继承优先级的线程避免了系统资源被任何中间优先级的线程抢占。
不过值得注意的,并不是将 C
直接放弃执行,给 A
执行,而是将 C
提升到 A
的优先级,从而保证在 C
释放锁之后,第一个执行的就是 A
,而不是 B
线程间通讯 我们完成了线程之间的锁,之后,我们可以进行线程间的通讯。RT
提供了一种 mailbox
机制
Mailbox rtdef.h link 1 2 3 4 5 6 7 8 9 10 11 12 struct rt_mailbox { struct rt_ipc_object parent ; rt_uint32_t * msg_pool; rt_uint16_t size; rt_uint16_t entry; rt_uint16_t in_offset, out_offset; rt_list_t suspend_sender_thread; }; typedef struct rt_mailbox * rt_mailbox_t ;
当我们需要发送消息的时候,就可以 rt_mb_send_wait()
与 rt_mb_send()
ipc.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 rt_err_t rt_mb_send_wait (rt_mailbox_t mb, rt_ubase_t value, rt_int32_t timeout) { struct rt_thread *thread ; register rt_ubase_t temp; rt_uint32_t tick_delta; tick_delta = 0 ; thread = rt_thread_self(); temp = rt_hw_interrupt_disable(); if (mb->entry == mb->size && timeout == 0 ) { rt_hw_interrupt_enable(temp); return -RT_EFULL; } while (mb->entry == mb->size) { } mb->msg_pool[mb->in_offset] = value; ++ mb->in_offset; if (mb->in_offset >= mb->size) mb->in_offset = 0 ; if (mb->entry < RT_MB_ENTRY_MAX) { mb->entry ++; } else { rt_hw_interrupt_enable(temp); return -RT_EFULL; } return RT_EOK; }
信号 信号其实也就是软中断,在原理上,一个线程收到一个信号与处理器收到一个中断请求可以说是类似的。
example 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 void thread1_signal_handler (int sig) { rt_kprintf("thread1 received signal %d\n" , sig); } static void thread1_entry (void *parameter) { int cnt = 0 ; rt_signal_install(SIGUSR1, thread1_signal_handler); rt_signal_unmask(SIGUSR1); while (cnt < 10 ) { rt_kprintf("thread1 count : %d\n" , cnt); cnt++; rt_thread_mdelay(100 ); } } int signal_sample (void ) { tid1 = rt_thread_create("thread1" , thread1_entry, RT_NULL, THREAD_STACK_SIZE, THREAD_PRIORITY, THREAD_TIMESLICE); if (tid1 != RT_NULL) rt_thread_startup(tid1); rt_thread_mdelay(300 ); rt_thread_kill(tid1, SIGUSR1); return 0 ; }
信号就是注册了一个回调函数在线程上,我们可以直接对着线程发送指令。
定时器 我们从上面一看到了绝大多数的时候,都会非常依赖于 Timer
的功能。而 Timer
本身也是有硬件和软件两种模式,RT
提供的是软时钟的模式。 软件的定时器也提供了 HARD_TIMER
SOFT_TIMER
两种模式,区别在于 HARD_TIMER
以中断的行为触发, SOFT_TIMER
是一个独立线程进行调度。
默认的 HARD_TIMER
模式下,将定时任务按照一个 Link List
储存起来,当节拍器触发到相对应的时候,就可以运行这个任务了。回忆一下,我们最初的 Clock
章节
clock.c link 1 2 3 4 5 void rt_tick_increase (void ) { rt_timer_check(); }
最后一步就我们的 Timer
检查。
timer.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 void rt_timer_check (void ) { current_tick = rt_tick_get(); while (!rt_list_isempty(&rt_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1 ])) { t = rt_list_entry(rt_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1 ].next, struct rt_timer, row[RT_TIMER_SKIP_LIST_LEVEL - 1 ]); if ((current_tick - t->timeout_tick) < RT_TICK_MAX / 2 ) { rt_list_insert_after(&list , &(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1 ])); t->timeout_func(t->parameter); current_tick = rt_tick_get(); } else break ; } }
到这里整个系统雏形已经完成,我们分析了整个内核的工作,下面我们去看看比较无聊但是很重要的 IO 设备。
网络驱动 网卡概念 RT-Thread 系统中目前支持三种协议栈类型: lwIP 协议栈、AT Socket 协议栈、WIZnet TCP/IP硬件协议栈。每种协议栈对应一种协议簇类型(family),上述协议栈分别对应的协议簇类型为:AF_INET、AF_AT、AF_WIZ。
Net 设备 以网络设备为例,有如下定义
netdev.h link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 struct netdev { rt_slist_t list ; char name[RT_NAME_MAX]; ip_addr_t ip_addr; ip_addr_t netmask; ip_addr_t gw; ip_addr_t dns_servers[NETDEV_DNS_SERVERS_NUM]; uint8_t hwaddr_len; uint8_t hwaddr[NETDEV_HWADDR_MAX_LEN]; uint16_t flags; uint16_t mtu; const struct netdev_ops *ops ; netdev_callback_fn status_callback; netdev_callback_fn addr_callback; void *user_data; };
在系统启动完成之后,会调用 netdev_register
进行系统的注册,而在这个函数中,会调动 device_init
进行设备注册。设备注册部分的逻辑在 ethernetif.c
中,相对固定不做展开,我们来看看数据的读写
网络读写 对于 RT
也提供了一个 Socket
抽象叫 SAL
sal_socket.c link 1 2 3 4 5 6 7 8 9 10 struct sal_socket { uint32_t magic; int socket; int domain; int type; int protocol; struct netdev *netdev ; void *user_data; };
我们可以发现,比较核心的就是 socket
会对应着一个 protocol
type
netdev
这样我们就知道我们的数据从什么网卡出去和进来了。
对于数据的读取如下
1 2 3 4 5 6 int sal_recvfrom (int socket, void *mem, size_t len, int flags, struct sockaddr *from, socklen_t *fromlen) { struct sal_socket *sock ; struct sal_proto_family *pf ; return pf->skt_ops->recvfrom((int ) sock->user_data, mem, len, flags, from, fromlen); }
根据 pf
协议的不同,我们用不同的方式读取。我们从参数中就可以发现读取的核心处理对象,从 mem
将数据拷贝到 user_data
。一顿搜索一下,我们就自然找到了这个。
socket.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 ssize_t lwip_recvfrom (int s, void *mem, size_t len, int flags, struct sockaddr *from, socklen_t *fromlen) { sock = get_socket(s); if (!sock) { return -1 ; } if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { err = netconn_recv_tcp_pbuf(sock->conn, (struct pbuf **)&buf); } else { err = netconn_recv(sock->conn, (struct netbuf **)&buf); } }
我们可以读取的过程是 netconn_recv_tcp_pbuf
进行操作的,对于 LWIP
的逻辑,我们后续再讨论,我们来细讨论下,网卡的数据和内核之间的交流。
网卡驱动
现在的网卡普遍已经不再使用 中断
来通知操作系统来获得数据了,因为中断频繁发生是很浪费上下文切换的。因此都是 NAPI
模式,也就是网卡自己处理数据从 TX/RX 中,不过在 RT
中还是使用 中断
来处理了。 比如在网卡读取数据时
drv_smc911x.c link 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 struct pbuf *smc911x_emac_rx (rt_device_t dev) { struct pbuf * p = RT_NULL; struct eth_device_smc911x *emac ; emac = SMC911X_EMAC_DEVICE(dev); RT_ASSERT(emac != RT_NULL); if ((smc911x_reg_read(emac, RX_FIFO_INF) & RX_FIFO_INF_RXSUSED) >> 16 ) { uint32_t status; uint32_t pktlen, tmplen; status = smc911x_reg_read(emac, RX_STATUS_FIFO); pktlen = (status & RX_STS_PKT_LEN) >> 16 ; smc911x_reg_write(emac, RX_CFG, 0 ); tmplen = (pktlen + 3 ) / 4 ; p = pbuf_alloc(PBUF_RAW, tmplen * 4 , PBUF_RAM); if (p) { uint32_t *data = (uint32_t *)p->payload; while (tmplen--) { *data++ = smc911x_reg_read(emac, RX_DATA_FIFO); } } } return p; }
在 init
时候
smc911x_emac_init 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 static rt_err_t smc911x_emac_init (rt_device_t dev) { smc911x_enable(emac); smc911x_reg_write(emac, FIFO_INT, 0x01 << 8 ); smc911x_reg_write(emac, INT_EN, INT_EN_RDFL_EN | INT_EN_RSFL_EN); smc911x_reg_write(emac, INT_CFG, INT_CFG_IRQ_EN | INT_CFG_IRQ_POL | INT_CFG_IRQ_TYPE); rt_hw_interrupt_install(emac->irqno, smc911x_isr, emac, "smc911x" ); rt_hw_interrupt_umask(emac->irqno); return RT_EOK; }