linux mmap源码分析-基于3.10.0-514

Posted 小树学习驿站

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了linux mmap源码分析-基于3.10.0-514相关的知识,希望对你有一定的参考价值。

最近在梳理mmap流程,下面简要介绍下:

SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off){ long error; error = -EINVAL; if (off & ~PAGE_MASK) //判断off是不是按页対齐的 goto out;
error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);out: return error;}
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff){ struct file *file = NULL; unsigned long retval = -EBADF;
if (!(flags & MAP_ANONYMOUS)) {//有名文件映射 audit_mmap_fd(fd, flags); if (unlikely(flags & MAP_HUGETLB)) return -EINVAL; file = fget(fd);//根据fd得到对应file结构 if (!file) goto out; if (is_file_hugepages(file))//如果是hugetlbfs文件系统文件,将文件大小对齐到页面大小 len = ALIGN(len, huge_page_size(hstate_file(file))); } else if (flags & MAP_HUGETLB) { struct user_struct *user = NULL; struct hstate *hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & SHM_HUGE_MASK);
if (!hs) return -EINVAL;
len = ALIGN(len, huge_page_size(hs)); /* * VM_NORESERVE is used because the reservations will be * taken when vm_ops->mmap() is called * A dummy user value is used because we are not locking * memory so no accounting is necessary */ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (IS_ERR(file)) return PTR_ERR(file); }
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); if (file) fput(file);out: return retval;}
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff){ unsigned long ret; struct mm_struct *mm = current->mm; unsigned long populate;
ret = security_mmap_file(file, prot, flag);//// security_开头的,都是security linux相关的,应该没有人的服务器会开这个,不用理会 if (!ret) { down_write(&mm->mmap_sem); ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, &populate); up_write(&mm->mmap_sem); if (populate) mm_populate(ret, populate); } return ret;}
unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate){ struct mm_struct * mm = current->mm; struct inode *inode; vm_flags_t vm_flags;
*populate = 0;
/* * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec * mounted, in which case we dont add PROT_EXEC.) */ //如果进程带有READ_IMPLIES_EXEC标记且文件系统是可执行的,则这段内存空间使用READ的属性会附带增加EXEC属性。 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) prot |= PROT_EXEC;
if (!len) return -EINVAL;
if (!(flags & MAP_FIXED))////如果不是使用固定地址,则使用addr向下对齐到页 addr = round_hint_to_min(addr);
/* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len) return -ENOMEM;
/* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)////判断是否溢出了 return -EOVERFLOW;
/* Too many mappings? */ if (mm->map_count > sysctl_max_map_count)//sysctl_max_map_count默认为22 return -ENOMEM;
/* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ addr = get_unmapped_area(file, addr, len, pgoff, flags);//根据addr及len等,获取未映射的地址空间 if (addr & ~PAGE_MASK)//如果返回的地址不是按照page对齐的,可以直接返回了 return addr;
/* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM;
/* mlock MCL_FUTURE? */ if (vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = len >> PAGE_SHIFT; locked += mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; }
inode = file ? file_inode(file) : NULL;
if (file) { switch (flags & MAP_TYPE) { case MAP_SHARED: if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) return -EACCES;
/* * Make sure we don't allow writing to an append-only * file.. */ if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) return -EACCES;
/* * Make sure there are no mandatory locks on the file. */ if (locks_verify_locked(inode)) return -EAGAIN;
vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
/* fall through */ case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; }
if (!file->f_op || !file->f_op->mmap) return -ENODEV; break;
default: return -EINVAL; } } else { switch (flags & MAP_TYPE) { case MAP_SHARED: /* * Ignore pgoff. */ pgoff = 0; vm_flags |= VM_SHARED | VM_MAYSHARE; break; case MAP_PRIVATE: /* * Set pgoff according to addr for anon_vma. */ pgoff = addr >> PAGE_SHIFT; break; default: return -EINVAL; } } //从上面的代码可以看到,各种映射之间,主要是vm_flag之间的差别 /* * Set 'VM_NORESERVE' if we should not account for the * memory use of this mapping. */ if (flags & MAP_NORESERVE) { /* We honor MAP_NORESERVE if allowed to overcommit */ if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) vm_flags |= VM_NORESERVE;
/* hugetlb applies strict overcommit unless MAP_NORESERVE */ if (file && is_file_hugepages(file)) vm_flags |= VM_NORESERVE; } //开始映射 addr = mmap_region(file, addr, len, vm_flags, pgoff); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; return addr;}

经典布局:

上述的选择布局的工作由位于arch/x86/mm/mmap.c中的arch_pick_mmap_layout完成。

/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */void arch_pick_mmap_layout(struct mm_struct *mm){ mm->mmap_legacy_base = mmap_legacy_base(); mm->mmap_base = mmap_base();
if (mmap_is_legacy()) {//从低地址向高地址创建新的映射 mm->mmap_base = mm->mmap_legacy_base; mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else {//从高地址向低地址创建新的映射 mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; }}

再看get_unmapped_area:

unsigned longget_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
unsigned long error = arch_mmap_check(addr, len, flags); if (error) return error;
/* Careful about overflows.. */ if (len > TASK_SIZE) return -ENOMEM;
/* 根据线性地址区间是否应该用于文件内存映射或匿名内存映射 */ get_area = current->mm->get_unmapped_area; if (file && file->f_op && file->f_op->get_unmapped_area) get_area = file->f_op->get_unmapped_area; /* * 当不是用于文件内存映射或是匿名内存映射, * 调用current->mm->get_unmapped_area. * 即调用arch_get_unmapped_area或arch_get_unmapped_area_topdown */ addr = get_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) return addr;
if (addr > TASK_SIZE - len) return -ENOMEM; if (addr & ~PAGE_MASK)//如果返回的地址不是按照page对齐的,返回-EINVAL return -EINVAL;
addr = arch_rebalance_pgtables(addr, len); error = security_mmap_addr(addr); return error ? error : addr;}

下面看看位于arch/x86/kernel/sys_x86_64.c中的arch_get_unmapped_area_topdown

unsigned longarch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags){ struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info;//struct vm_unmapped_area_info,这个数据结构用于管理分配内存请求
/* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; /* MAP_FIXED : 表示映射将在固定地址创建 */ if (flags & MAP_FIXED) return addr;
/* for MAP_32BIT mappings we force the legacy mmap base */ if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) goto bottomup;
/* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); /* * find_vma 寻找第一个满足 addr < vm_area_struct->vm_end 的vma区 */ vma = find_vma(mm, addr); /* * 以下分别判断: * 1: 请求分配的长度是否小于进程虚拟地址空间大小 * 2: vma是否空 * 3: vma非空,新分配的虚拟地址空间,是否与相邻的vma重合 */ if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; }
info.flags = VM_UNMAPPED_AREA_TOPDOWN;//默认从高地址到低地址的映射 info.length = len; info.low_limit = PAGE_SIZE;//低地址限制,分配的内存起始地址不能小于该值 info.high_limit = mm->mmap_base;//高地址限制,分配的内存结束地址不能大于或等于该值 info.align_mask = 0;//length对齐方式 info.align_offset = pgoff << PAGE_SHIFT;//offset对齐方式 if (filp) { info.align_mask = get_align_mask(); info.align_offset += get_align_bits(); } addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK))//如果返回的地址是按照page对齐的,则直接返回地址,否则按照传统布局重新获取未mmap区域 return addr; VM_BUG_ON(addr != -ENOMEM);
bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);}
static inline unsigned longvm_unmapped_area(struct vm_unmapped_area_info *info){ if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN)) return unmapped_area(info); else return unmapped_area_topdown(info);//默认走这个分支}

unmapped_area_topdown中为了提高红黑树的遍历效率,使用了一个rb_subtree_gap的概念。

有必要搞清楚vm_area_struct结构体中rb_subtree_gap的含义。在http://patchwork.ozlabs.org/patch/197340/ 有解释:

rb_subtree_gap是当前结点与其前驱结点之间空隙和当前结点其左右子树中的结点间的最大空隙的最大值。

unmapped_area_topdown中先从右子树遍历查询,再判断根节点,最后从左子树查询

unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info){ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long length, low_limit, high_limit, gap_start, gap_end;
/* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM;
/* * Adjust search limits by the desired length. * See implementation comment at top of unmapped_area(). */ gap_end = info->high_limit; if (gap_end < length) return -ENOMEM; high_limit = gap_end - length;
if (info->low_limit > high_limit) return -ENOMEM; low_limit = info->low_limit + length;
/* Check highest gap, which does not precede any rbtree node */ gap_start = mm->highest_vm_end; if (gap_start <= high_limit) goto found_highest;
/* Check if rbtree root looks promising */ if (RB_EMPTY_ROOT(&mm->mm_rb)) return -ENOMEM; /* 获取根节点的vma,并判断rb_subtree_gap是否满足分配需求, * 如果不满足需求则直接返回-ENOMEM; */ vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); if (vma->rb_subtree_gap < length) return -ENOMEM;
while (true) { /* Visit right subtree if it looks promising */ gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); if (right->rb_subtree_gap >= length) { vma = right; continue; } }
check_current: /* Check if current node has a suitable gap */ gap_end = vma->vm_start; if (gap_end < low_limit) return -ENOMEM; if (gap_start <= high_limit && gap_end - gap_start >= length) goto found;
/* Visit left subtree if it looks promising */ if (vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); if (left->rb_subtree_gap >= length) { vma = left; continue; } }
/* Go back up the rbtree to find next candidate node */ while (true) { struct rb_node *prev = &vma->vm_rb; if (!rb_parent(prev)) return -ENOMEM; vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; goto check_current; } } }
found: /* We found a suitable gap. Clip it with the original high_limit. */ if (gap_end > info->high_limit) gap_end = info->high_limit;
found_highest: /* Compute highest gap address at the desired alignment */ gap_end -= info->length; gap_end -= (gap_end - info->align_offset) & info->align_mask;
VM_BUG_ON(gap_end < info->low_limit); VM_BUG_ON(gap_end < gap_start); return gap_end;}

接下来看下mmap_region,这个函数主要的过程是:1、能不能映射;2、创建或扩展一个VMA;3、设置标记位,填充vma的字段;4、将vma链入进程rbtree。

unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff){ struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; int error; struct rb_node **rb_link, *rb_parent; unsigned long charged = 0;
/* Check against address space limit. */ //检查mmap后会不会超内存限制 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) { unsigned long nr_pages;
/* * MAP_FIXED may remove pages of mappings that intersects with * requested mapping. Account for the pages it would unmap. */ if (!(vm_flags & MAP_FIXED)) return -ENOMEM;
nr_pages = count_vma_pages_range(mm, addr, addr + len);
if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages)) return -ENOMEM; }
/* Clear old maps */ error = -ENOMEM;munmap_back: //有覆盖的先解映射了 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len)) return -ENOMEM; goto munmap_back; }
/* * Private writable mapping: check memory availability */ if (accountable_mapping(file, vm_flags)) { charged = len >> PAGE_SHIFT; if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; vm_flags |= VM_ACCOUNT; }
/* * Can we just expand an old mapping? */ //如果可以和已有的VMA进行合并,则直接使用该vma vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX); if (vma) goto out;
/* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ //申请一个vma进行map vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { error = -ENOMEM; goto unacct_error; }
vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; INIT_LIST_HEAD(&vma->anon_vma_chain);
error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
if (file) { //文件映射 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) goto free_vma; if (vm_flags & VM_DENYWRITE) { error = deny_write_access(file); if (error) goto free_vma; } if (vm_flags & VM_SHARED) { error = mapping_map_writable(file->f_mapping); if (error) goto allow_write_and_free_vma; }
/* ->mmap() can change vma->vm_file, but must guarantee that * vma_link() below can deny write-access if VM_DENYWRITE is set * and map writably if VM_SHARED is set. This usually means the * new file must not have been exposed to user-space, yet. */ vma->vm_file = get_file(file); error = file->f_op->mmap(file, vma);//调用文件系统提供的映射函数 if (error) goto unmap_and_free_vma;
/* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their * f_op->mmap method. -DaveM * Bug: If addr is changed, prev, rb_link, rb_parent should * be updated for vma_link() */ WARN_ON_ONCE(addr != vma->vm_start);
//文件系统提供的mmap函数可能会修改映射的一些参数。 // 在这里需要在调用vma_link前回置 addr = vma->vm_start; pgoff = vma->vm_pgoff; vm_flags = vma->vm_flags; } else if (vm_flags & VM_SHARED) {//匿名共享映射 if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP))) goto free_vma; error = shmem_zero_setup(vma);//这里是映射到了/dev/zero这个文件,很巧妙,不需要提前将页面清0 if (error) goto free_vma; }
//将vma链接回进程mm中 vma_link(mm, vma, prev, rb_link, rb_parent); /* Once vma denies write, undo our temporary denial count */ if (file) { if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); if (vm_flags & VM_DENYWRITE) allow_write_access(file); } file = vma->vm_file;out: //perf在这里安插了个event perf_event_mmap(vma);
//进程内存状态统计,在开启了proc才会有 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))) mm->locked_vm += (len >> PAGE_SHIFT); else vma->vm_flags &= ~VM_LOCKED; }
if (file) uprobe_mmap(vma);
/* * New (or expanded) vma always get soft dirty status. * Otherwise user-space soft-dirty page tracker won't * be able to distinguish situation when vma area unmapped, * then new mapped in-place (which must be aimed as * a completely new data area). */ vma->vm_flags |= VM_SOFTDIRTY;
vma_set_page_prot(vma);
return addr;
unmap_and_free_vma: vma->vm_file = NULL; fput(file);
/* Undo any partial mapping done by a device driver. */ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping);allow_write_and_free_vma: if (vm_flags & VM_DENYWRITE) allow_write_access(file);free_vma: kmem_cache_free(vm_area_cachep, vma);unacct_error: if (charged) vm_unacct_memory(charged);  return error;}
/** * shmem_zero_setup - setup a shared anonymous mapping * @vma: the vma to be mmapped is prepared by do_mmap_pgoff */int shmem_zero_setup(struct vm_area_struct *vma){ struct file *file; loff_t size = vma->vm_end - vma->vm_start;
file = shmem_file_setup("dev/zero", size, vma->vm_flags); if (IS_ERR(file)) return PTR_ERR(file);
if (vma->vm_file) fput(vma->vm_file); vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; return 0;}


以上是关于linux mmap源码分析-基于3.10.0-514的主要内容,如果未能解决你的问题,请参考以下文章

Linux 内核 内存管理mmap 系统调用源码分析 ④ ( do_mmap 函数执行流程 | do_mmap 函数源码 )

Linux 内核 内存管理mmap 系统调用源码分析 ⑤ ( mmap_region 函数执行流程 | mmap_region 函数源码 )

Linux 内核 内存管理mmap 系统调用源码分析 ③ ( vm_mmap_pgoff 函数执行流程 | vm_mmap_pgoff 函数源码 )

linux内核源码分析之虚拟内存映射

RAMCloud源码分析

RAMCloud源码分析