From 6ec462bbf1c4d7215dda250e8c12d0c64a872530 Mon Sep 17 00:00:00 2001 From: "Wang, Liang" Date: Tue, 17 Sep 2019 19:50:03 +0800 Subject: [PATCH 1/8] Initial commit for CIC. Tracked-On: Signed-off-by: Wang, Liang --- ashmem/Makefile | 14 + ashmem/ashmem.c | 905 +++++ ashmem/ashmem.h | 27 + ashmem/deps.c | 11 + ashmem/dkms.conf | 7 + ashmem/uapi/ashmem.h | 47 + binder/Makefile | 17 + binder/binder.c | 5643 +++++++++++++++++++++++++++++++ binder/binder_alloc.c | 1016 ++++++ binder/binder_alloc.h | 186 + binder/binder_ctl.h | 35 + binder/binder_internal.h | 49 + binder/binder_trace.h | 387 +++ binder/binderfs.c | 550 +++ binder/deps.c | 161 + mac80211_hwsim/Makefile | 6 + mac80211_hwsim/mac80211_hwsim.c | 3590 ++++++++++++++++++++ mac80211_hwsim/mac80211_hwsim.h | 174 + 18 files changed, 12825 insertions(+) create mode 100644 ashmem/Makefile create mode 100644 ashmem/ashmem.c create mode 100644 ashmem/ashmem.h create mode 100644 ashmem/deps.c create mode 100644 ashmem/dkms.conf create mode 100644 ashmem/uapi/ashmem.h create mode 100644 binder/Makefile create mode 100644 binder/binder.c create mode 100644 binder/binder_alloc.c create mode 100644 binder/binder_alloc.h create mode 100644 binder/binder_ctl.h create mode 100644 binder/binder_internal.h create mode 100644 binder/binder_trace.h create mode 100644 binder/binderfs.c create mode 100644 binder/deps.c create mode 100644 mac80211_hwsim/Makefile create mode 100644 mac80211_hwsim/mac80211_hwsim.c create mode 100644 mac80211_hwsim/mac80211_hwsim.h diff --git a/ashmem/Makefile b/ashmem/Makefile new file mode 100644 index 0000000..1193713 --- /dev/null +++ b/ashmem/Makefile @@ -0,0 +1,14 @@ +ccflags-y += -I$(src) -Wno-error=implicit-int -Wno-int-conversion +obj-m := ashmem_module.o +ashmem_module-y := deps.o ashmem.o + +KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build + +all: + $(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD + +install: + cp ashmem_module.ko $(DESTDIR)/ + +clean: + rm -rf deps.h *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions diff --git a/ashmem/ashmem.c b/ashmem/ashmem.c new file mode 100644 index 0000000..b64c646 --- /dev/null +++ b/ashmem/ashmem.c @@ -0,0 +1,905 @@ +/* mm/ashmem.c + * + * Anonymous Shared Memory Subsystem, ashmem + * + * Copyright (C) 2008 Google, Inc. + * + * Robert Love + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "ashmem: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ashmem.h" + +#define ASHMEM_NAME_PREFIX "dev/ashmem/" +#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) +#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) + +/** + * struct ashmem_area - The anonymous shared memory area + * @name: The optional name in /proc/pid/maps + * @unpinned_list: The list of all ashmem areas + * @file: The shmem-based backing file + * @size: The size of the mapping, in bytes + * @prot_mask: The allowed protection bits, as vm_flags + * + * The lifecycle of this structure is from our parent file's open() until + * its release(). It is also protected by 'ashmem_mutex' + * + * Warning: Mappings do NOT pin this structure; It dies on close() + */ +struct ashmem_area { + char name[ASHMEM_FULL_NAME_LEN]; + struct list_head unpinned_list; + struct file *file; + size_t size; + unsigned long prot_mask; +}; + +/** + * struct ashmem_range - A range of unpinned/evictable pages + * @lru: The entry in the LRU list + * @unpinned: The entry in its area's unpinned list + * @asma: The associated anonymous shared memory area. + * @pgstart: The starting page (inclusive) + * @pgend: The ending page (inclusive) + * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) + * + * The lifecycle of this structure is from unpin to pin. + * It is protected by 'ashmem_mutex' + */ +struct ashmem_range { + struct list_head lru; + struct list_head unpinned; + struct ashmem_area *asma; + size_t pgstart; + size_t pgend; + unsigned int purged; +}; + +/* LRU list of unpinned pages, protected by ashmem_mutex */ +static LIST_HEAD(ashmem_lru_list); + +/* + * long lru_count - The count of pages on our LRU list. + * + * This is protected by ashmem_mutex. + */ +static unsigned long lru_count; + +/* + * ashmem_mutex - protects the list of and each individual ashmem_area + * + * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem + */ +static DEFINE_MUTEX(ashmem_mutex); + +static struct kmem_cache *ashmem_area_cachep __read_mostly; +static struct kmem_cache *ashmem_range_cachep __read_mostly; + +static inline unsigned long range_size(struct ashmem_range *range) +{ + return range->pgend - range->pgstart + 1; +} + +static inline bool range_on_lru(struct ashmem_range *range) +{ + return range->purged == ASHMEM_NOT_PURGED; +} + +static inline bool page_range_subsumes_range(struct ashmem_range *range, + size_t start, size_t end) +{ + return (range->pgstart >= start) && (range->pgend <= end); +} + +static inline bool page_range_subsumed_by_range(struct ashmem_range *range, + size_t start, size_t end) +{ + return (range->pgstart <= start) && (range->pgend >= end); +} + +static inline bool page_in_range(struct ashmem_range *range, size_t page) +{ + return (range->pgstart <= page) && (range->pgend >= page); +} + +static inline bool page_range_in_range(struct ashmem_range *range, + size_t start, size_t end) +{ + return page_in_range(range, start) || page_in_range(range, end) || + page_range_subsumes_range(range, start, end); +} + +static inline bool range_before_page(struct ashmem_range *range, size_t page) +{ + return range->pgend < page; +} + +#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) + +/** + * lru_add() - Adds a range of memory to the LRU list + * @range: The memory range being added. + * + * The range is first added to the end (tail) of the LRU list. + * After this, the size of the range is added to @lru_count + */ +static inline void lru_add(struct ashmem_range *range) +{ + list_add_tail(&range->lru, &ashmem_lru_list); + lru_count += range_size(range); +} + +/** + * lru_del() - Removes a range of memory from the LRU list + * @range: The memory range being removed + * + * The range is first deleted from the LRU list. + * After this, the size of the range is removed from @lru_count + */ +static inline void lru_del(struct ashmem_range *range) +{ + list_del(&range->lru); + lru_count -= range_size(range); +} + +/** + * range_alloc() - Allocates and initializes a new ashmem_range structure + * @asma: The associated ashmem_area + * @prev_range: The previous ashmem_range in the sorted asma->unpinned list + * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) + * @start: The starting page (inclusive) + * @end: The ending page (inclusive) + * + * This function is protected by ashmem_mutex. + * + * Return: 0 if successful, or -ENOMEM if there is an error + */ +static int range_alloc(struct ashmem_area *asma, + struct ashmem_range *prev_range, unsigned int purged, + size_t start, size_t end) +{ + struct ashmem_range *range; + + range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); + if (unlikely(!range)) + return -ENOMEM; + + range->asma = asma; + range->pgstart = start; + range->pgend = end; + range->purged = purged; + + list_add_tail(&range->unpinned, &prev_range->unpinned); + + if (range_on_lru(range)) + lru_add(range); + + return 0; +} + +/** + * range_del() - Deletes and dealloctes an ashmem_range structure + * @range: The associated ashmem_range that has previously been allocated + */ +static void range_del(struct ashmem_range *range) +{ + list_del(&range->unpinned); + if (range_on_lru(range)) + lru_del(range); + kmem_cache_free(ashmem_range_cachep, range); +} + +/** + * range_shrink() - Shrinks an ashmem_range + * @range: The associated ashmem_range being shrunk + * @start: The starting byte of the new range + * @end: The ending byte of the new range + * + * This does not modify the data inside the existing range in any way - It + * simply shrinks the boundaries of the range. + * + * Theoretically, with a little tweaking, this could eventually be changed + * to range_resize, and expand the lru_count if the new range is larger. + */ +static inline void range_shrink(struct ashmem_range *range, + size_t start, size_t end) +{ + size_t pre = range_size(range); + + range->pgstart = start; + range->pgend = end; + + if (range_on_lru(range)) + lru_count -= pre - range_size(range); +} + +/** + * ashmem_open() - Opens an Anonymous Shared Memory structure + * @inode: The backing file's index node(?) + * @file: The backing file + * + * Please note that the ashmem_area is not returned by this function - It is + * instead written to "file->private_data". + * + * Return: 0 if successful, or another code if unsuccessful. + */ +static int ashmem_open(struct inode *inode, struct file *file) +{ + struct ashmem_area *asma; + int ret; + + ret = generic_file_open(inode, file); + if (unlikely(ret)) + return ret; + + asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); + if (unlikely(!asma)) + return -ENOMEM; + + INIT_LIST_HEAD(&asma->unpinned_list); + memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); + asma->prot_mask = PROT_MASK; + file->private_data = asma; + + return 0; +} + +/** + * ashmem_release() - Releases an Anonymous Shared Memory structure + * @ignored: The backing file's Index Node(?) - It is ignored here. + * @file: The backing file + * + * Return: 0 if successful. If it is anything else, go have a coffee and + * try again. + */ +static int ashmem_release(struct inode *ignored, struct file *file) +{ + struct ashmem_area *asma = file->private_data; + struct ashmem_range *range, *next; + + mutex_lock(&ashmem_mutex); + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) + range_del(range); + mutex_unlock(&ashmem_mutex); + + if (asma->file) + fput(asma->file); + kmem_cache_free(ashmem_area_cachep, asma); + + return 0; +} + +static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + struct ashmem_area *asma = iocb->ki_filp->private_data; + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* If size is not set, or set to 0, always return EOF. */ + if (asma->size == 0) + goto out_unlock; + + if (!asma->file) { + ret = -EBADF; + goto out_unlock; + } + + /* + * asma and asma->file are used outside the lock here. We assume + * once asma->file is set it will never be changed, and will not + * be destroyed until all references to the file are dropped and + * ashmem_release is called. + */ + mutex_unlock(&ashmem_mutex); + ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0); + mutex_lock(&ashmem_mutex); + if (ret > 0) + asma->file->f_pos = iocb->ki_pos; +out_unlock: + mutex_unlock(&ashmem_mutex); + return ret; +} + +static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) +{ + struct ashmem_area *asma = file->private_data; + int ret; + + mutex_lock(&ashmem_mutex); + + if (asma->size == 0) { + mutex_unlock(&ashmem_mutex); + return -EINVAL; + } + + if (!asma->file) { + mutex_unlock(&ashmem_mutex); + return -EBADF; + } + + mutex_unlock(&ashmem_mutex); + + ret = vfs_llseek(asma->file, offset, origin); + if (ret < 0) + return ret; + + /** Copy f_pos from backing file, since f_ops->llseek() sets it */ + file->f_pos = asma->file->f_pos; + return ret; +} + +static inline vm_flags_t calc_vm_may_flags(unsigned long prot) +{ + return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | + _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | + _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); +} + +static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct ashmem_area *asma = file->private_data; + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* user needs to SET_SIZE before mapping */ + if (unlikely(!asma->size)) { + ret = -EINVAL; + goto out; + } + + /* requested protection bits must match our allowed protection mask */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) + if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & + calc_vm_prot_bits(PROT_MASK, 0))) { +#else + if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & + calc_vm_prot_bits(PROT_MASK))) { +#endif + ret = -EPERM; + goto out; + } + vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); + + if (!asma->file) { + char *name = ASHMEM_NAME_DEF; + struct file *vmfile; + + if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') + name = asma->name; + + /* ... and allocate the backing shmem file */ + vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); + if (IS_ERR(vmfile)) { + ret = PTR_ERR(vmfile); + goto out; + } + vmfile->f_mode |= FMODE_LSEEK; + asma->file = vmfile; + } + get_file(asma->file); + + /* + * XXX - Reworked to use shmem_zero_setup() instead of + * shmem_set_file while we're in staging. -jstultz + */ + if (vma->vm_flags & VM_SHARED) { + ret = shmem_zero_setup(vma); + if (ret) { + fput(asma->file); + goto out; + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) + } else { + vma_set_anonymous(vma); +#endif + } + + if (vma->vm_file) + fput(vma->vm_file); + vma->vm_file = asma->file; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +/* + * ashmem_shrink - our cache shrinker, called from mm/vmscan.c + * + * 'nr_to_scan' is the number of objects to scan for freeing. + * + * 'gfp_mask' is the mask of the allocation that got us into this mess. + * + * Return value is the number of objects freed or -1 if we cannot + * proceed without risk of deadlock (due to gfp_mask). + * + * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial + * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' + * pages freed. + */ +static unsigned long +ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + struct ashmem_range *range, *next; + unsigned long freed = 0; + + /* We might recurse into filesystem code, so bail out if necessary */ + if (!(sc->gfp_mask & __GFP_FS)) + return SHRINK_STOP; + + if (!mutex_trylock(&ashmem_mutex)) + return -1; + + list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { + loff_t start = range->pgstart * PAGE_SIZE; + loff_t end = (range->pgend + 1) * PAGE_SIZE; + + vfs_fallocate(range->asma->file, + FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + start, end - start); + range->purged = ASHMEM_WAS_PURGED; + lru_del(range); + + freed += range_size(range); + if (--sc->nr_to_scan <= 0) + break; + } + mutex_unlock(&ashmem_mutex); + return freed; +} + +static unsigned long +ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +{ + /* + * note that lru_count is count of pages on the lru, not a count of + * objects on the list. This means the scan function needs to return the + * number of pages freed, not the number of objects scanned. + */ + return lru_count; +} + +static struct shrinker ashmem_shrinker = { + .count_objects = ashmem_shrink_count, + .scan_objects = ashmem_shrink_scan, + /* + * XXX (dchinner): I wish people would comment on why they need on + * significant changes to the default value here + */ + .seeks = DEFAULT_SEEKS * 4, +}; + +static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) +{ + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* the user can only remove, not add, protection bits */ + if (unlikely((asma->prot_mask & prot) != prot)) { + ret = -EINVAL; + goto out; + } + + /* does the application expect PROT_READ to imply PROT_EXEC? */ + if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) + prot |= PROT_EXEC; + + asma->prot_mask = prot; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +static int set_name(struct ashmem_area *asma, void __user *name) +{ + int len; + int ret = 0; + char local_name[ASHMEM_NAME_LEN]; + + /* + * Holding the ashmem_mutex while doing a copy_from_user might cause + * an data abort which would try to access mmap_sem. If another + * thread has invoked ashmem_mmap then it will be holding the + * semaphore and will be waiting for ashmem_mutex, there by leading to + * deadlock. We'll release the mutex and take the name to a local + * variable that does not need protection and later copy the local + * variable to the structure member with lock held. + */ + len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); + if (len < 0) + return len; + if (len == ASHMEM_NAME_LEN) + local_name[ASHMEM_NAME_LEN - 1] = '\0'; + mutex_lock(&ashmem_mutex); + /* cannot change an existing mapping's name */ + if (unlikely(asma->file)) + ret = -EINVAL; + else + strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); + + mutex_unlock(&ashmem_mutex); + return ret; +} + +static int get_name(struct ashmem_area *asma, void __user *name) +{ + int ret = 0; + size_t len; + /* + * Have a local variable to which we'll copy the content + * from asma with the lock held. Later we can copy this to the user + * space safely without holding any locks. So even if we proceed to + * wait for mmap_sem, it won't lead to deadlock. + */ + char local_name[ASHMEM_NAME_LEN]; + + mutex_lock(&ashmem_mutex); + if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { + /* + * Copying only `len', instead of ASHMEM_NAME_LEN, bytes + * prevents us from revealing one user's stack to another. + */ + len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; + memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); + } else { + len = sizeof(ASHMEM_NAME_DEF); + memcpy(local_name, ASHMEM_NAME_DEF, len); + } + mutex_unlock(&ashmem_mutex); + + /* + * Now we are just copying from the stack variable to userland + * No lock held + */ + if (unlikely(copy_to_user(name, local_name, len))) + ret = -EFAULT; + return ret; +} + +/* + * ashmem_pin - pin the given ashmem region, returning whether it was + * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +{ + struct ashmem_range *range, *next; + int ret = ASHMEM_NOT_PURGED; + + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { + /* moved past last applicable page; we can short circuit */ + if (range_before_page(range, pgstart)) + break; + + /* + * The user can ask us to pin pages that span multiple ranges, + * or to pin pages that aren't even unpinned, so this is messy. + * + * Four cases: + * 1. The requested range subsumes an existing range, so we + * just remove the entire matching range. + * 2. The requested range overlaps the start of an existing + * range, so we just update that range. + * 3. The requested range overlaps the end of an existing + * range, so we just update that range. + * 4. The requested range punches a hole in an existing range, + * so we have to update one side of the range and then + * create a new range for the other side. + */ + if (page_range_in_range(range, pgstart, pgend)) { + ret |= range->purged; + + /* Case #1: Easy. Just nuke the whole thing. */ + if (page_range_subsumes_range(range, pgstart, pgend)) { + range_del(range); + continue; + } + + /* Case #2: We overlap from the start, so adjust it */ + if (range->pgstart >= pgstart) { + range_shrink(range, pgend + 1, range->pgend); + continue; + } + + /* Case #3: We overlap from the rear, so adjust it */ + if (range->pgend <= pgend) { + range_shrink(range, range->pgstart, + pgstart - 1); + continue; + } + + /* + * Case #4: We eat a chunk out of the middle. A bit + * more complicated, we allocate a new range for the + * second half and adjust the first chunk's endpoint. + */ + range_alloc(asma, range, range->purged, + pgend + 1, range->pgend); + range_shrink(range, range->pgstart, pgstart - 1); + break; + } + } + + return ret; +} + +/* + * ashmem_unpin - unpin the given range of pages. Returns zero on success. + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +{ + struct ashmem_range *range, *next; + unsigned int purged = ASHMEM_NOT_PURGED; + +restart: + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { + /* short circuit: this is our insertion point */ + if (range_before_page(range, pgstart)) + break; + + /* + * The user can ask us to unpin pages that are already entirely + * or partially pinned. We handle those two cases here. + */ + if (page_range_subsumed_by_range(range, pgstart, pgend)) + return 0; + if (page_range_in_range(range, pgstart, pgend)) { + pgstart = min(range->pgstart, pgstart); + pgend = max(range->pgend, pgend); + purged |= range->purged; + range_del(range); + goto restart; + } + } + + return range_alloc(asma, range, purged, pgstart, pgend); +} + +/* + * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the + * given interval are unpinned and ASHMEM_IS_PINNED otherwise. + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, + size_t pgend) +{ + struct ashmem_range *range; + int ret = ASHMEM_IS_PINNED; + + list_for_each_entry(range, &asma->unpinned_list, unpinned) { + if (range_before_page(range, pgstart)) + break; + if (page_range_in_range(range, pgstart, pgend)) { + ret = ASHMEM_IS_UNPINNED; + break; + } + } + + return ret; +} + +static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, + void __user *p) +{ + struct ashmem_pin pin; + size_t pgstart, pgend; + int ret = -EINVAL; + + if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) + return -EFAULT; + + mutex_lock(&ashmem_mutex); + + if (unlikely(!asma->file)) + goto out_unlock; + + /* per custom, you can pass zero for len to mean "everything onward" */ + if (!pin.len) + pin.len = PAGE_ALIGN(asma->size) - pin.offset; + + if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) + goto out_unlock; + + if (unlikely(((__u32)-1) - pin.offset < pin.len)) + goto out_unlock; + + if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) + goto out_unlock; + + pgstart = pin.offset / PAGE_SIZE; + pgend = pgstart + (pin.len / PAGE_SIZE) - 1; + + switch (cmd) { + case ASHMEM_PIN: + ret = ashmem_pin(asma, pgstart, pgend); + break; + case ASHMEM_UNPIN: + ret = ashmem_unpin(asma, pgstart, pgend); + break; + case ASHMEM_GET_PIN_STATUS: + ret = ashmem_get_pin_status(asma, pgstart, pgend); + break; + } + +out_unlock: + mutex_unlock(&ashmem_mutex); + + return ret; +} + +static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct ashmem_area *asma = file->private_data; + long ret = -ENOTTY; + + switch (cmd) { + case ASHMEM_SET_NAME: + ret = set_name(asma, (void __user *)arg); + break; + case ASHMEM_GET_NAME: + ret = get_name(asma, (void __user *)arg); + break; + case ASHMEM_SET_SIZE: + ret = -EINVAL; + mutex_lock(&ashmem_mutex); + if (!asma->file) { + ret = 0; + asma->size = (size_t)arg; + } + mutex_unlock(&ashmem_mutex); + break; + case ASHMEM_GET_SIZE: + ret = asma->size; + break; + case ASHMEM_SET_PROT_MASK: + ret = set_prot_mask(asma, arg); + break; + case ASHMEM_GET_PROT_MASK: + ret = asma->prot_mask; + break; + case ASHMEM_PIN: + case ASHMEM_UNPIN: + case ASHMEM_GET_PIN_STATUS: + ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg); + break; + case ASHMEM_PURGE_ALL_CACHES: + ret = -EPERM; + if (capable(CAP_SYS_ADMIN)) { + struct shrink_control sc = { + .gfp_mask = GFP_KERNEL, + .nr_to_scan = LONG_MAX, + }; + ret = ashmem_shrink_count(&ashmem_shrinker, &sc); + ashmem_shrink_scan(&ashmem_shrinker, &sc); + } + break; + } + + return ret; +} + +/* support of 32bit userspace on 64bit platforms */ +#ifdef CONFIG_COMPAT +static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case COMPAT_ASHMEM_SET_SIZE: + cmd = ASHMEM_SET_SIZE; + break; + case COMPAT_ASHMEM_SET_PROT_MASK: + cmd = ASHMEM_SET_PROT_MASK; + break; + } + return ashmem_ioctl(file, cmd, arg); +} +#endif + +static const struct file_operations ashmem_fops = { + .owner = THIS_MODULE, + .open = ashmem_open, + .release = ashmem_release, + .read_iter = ashmem_read_iter, + .llseek = ashmem_llseek, + .mmap = ashmem_mmap, + .unlocked_ioctl = ashmem_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_ashmem_ioctl, +#endif +}; + +static struct miscdevice ashmem_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ashmem", + .fops = &ashmem_fops, +}; + +static int __init ashmem_init(void) +{ + int ret = -ENOMEM; + + ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", + sizeof(struct ashmem_area), + 0, 0, NULL); + if (unlikely(!ashmem_area_cachep)) { + pr_err("failed to create slab cache\n"); + goto out; + } + + ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", + sizeof(struct ashmem_range), + 0, 0, NULL); + if (unlikely(!ashmem_range_cachep)) { + pr_err("failed to create slab cache\n"); + goto out_free1; + } + + ret = misc_register(&ashmem_misc); + if (unlikely(ret)) { + pr_err("failed to register misc device!\n"); + goto out_free2; + } + + register_shrinker(&ashmem_shrinker); + + pr_info("initialized\n"); + + return 0; + +out_free2: + kmem_cache_destroy(ashmem_range_cachep); +out_free1: + kmem_cache_destroy(ashmem_area_cachep); +out: + return ret; +} + +static void __exit ashmem_exit(void) +{ + unregister_shrinker(&ashmem_shrinker); + + misc_deregister(&ashmem_misc); + + kmem_cache_destroy(ashmem_range_cachep); + kmem_cache_destroy(ashmem_area_cachep); +} + +module_init(ashmem_init); +module_exit(ashmem_exit); + +MODULE_LICENSE("GPL"); diff --git a/ashmem/ashmem.h b/ashmem/ashmem.h new file mode 100644 index 0000000..5abcfd7 --- /dev/null +++ b/ashmem/ashmem.h @@ -0,0 +1,27 @@ +/* + * include/linux/ashmem.h + * + * Copyright 2008 Google Inc. + * Author: Robert Love + * + * This file is dual licensed. It may be redistributed and/or modified + * under the terms of the Apache 2.0 License OR version 2 of the GNU + * General Public License. + */ + +#ifndef _LINUX_ASHMEM_H +#define _LINUX_ASHMEM_H + +#include +#include +#include + +#include "uapi/ashmem.h" + +/* support of 32bit userspace on 64bit platforms */ +#ifdef CONFIG_COMPAT +#define COMPAT_ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, compat_size_t) +#define COMPAT_ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned int) +#endif + +#endif /* _LINUX_ASHMEM_H */ diff --git a/ashmem/deps.c b/ashmem/deps.c new file mode 100644 index 0000000..ce6016f --- /dev/null +++ b/ashmem/deps.c @@ -0,0 +1,11 @@ +#include +#include + +static int (*shmem_zero_setup_ptr)(struct vm_area_struct *) = NULL; + +int shmem_zero_setup(struct vm_area_struct *vma) +{ + if (!shmem_zero_setup_ptr) + shmem_zero_setup_ptr = kallsyms_lookup_name("shmem_zero_setup"); + return shmem_zero_setup_ptr(vma); +} diff --git a/ashmem/dkms.conf b/ashmem/dkms.conf new file mode 100644 index 0000000..715fa57 --- /dev/null +++ b/ashmem/dkms.conf @@ -0,0 +1,7 @@ +PACKAGE_NAME="anbox-ashmem" +PACKAGE_VERSION="1" +CLEAN="make clean" +MAKE[0]="make all KERNEL_SRC=/lib/modules/$kernelver/build" +BUILT_MODULE_NAME[0]="ashmem_linux" +DEST_MODULE_LOCATION[0]="/updates" +AUTOINSTALL="yes" diff --git a/ashmem/uapi/ashmem.h b/ashmem/uapi/ashmem.h new file mode 100644 index 0000000..ba4743c --- /dev/null +++ b/ashmem/uapi/ashmem.h @@ -0,0 +1,47 @@ +/* + * drivers/staging/android/uapi/ashmem.h + * + * Copyright 2008 Google Inc. + * Author: Robert Love + * + * This file is dual licensed. It may be redistributed and/or modified + * under the terms of the Apache 2.0 License OR version 2 of the GNU + * General Public License. + */ + +#ifndef _UAPI_LINUX_ASHMEM_H +#define _UAPI_LINUX_ASHMEM_H + +#include + +#define ASHMEM_NAME_LEN 256 + +#define ASHMEM_NAME_DEF "dev/ashmem" + +/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ +#define ASHMEM_NOT_PURGED 0 +#define ASHMEM_WAS_PURGED 1 + +/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ +#define ASHMEM_IS_UNPINNED 0 +#define ASHMEM_IS_PINNED 1 + +struct ashmem_pin { + __u32 offset; /* offset into region, in bytes, page-aligned */ + __u32 len; /* length forward from offset, in bytes, page-aligned */ +}; + +#define __ASHMEMIOC 0x77 + +#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) +#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) +#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t) +#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4) +#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long) +#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6) +#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin) +#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin) +#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9) +#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10) + +#endif /* _UAPI_LINUX_ASHMEM_H */ diff --git a/binder/Makefile b/binder/Makefile new file mode 100644 index 0000000..b4ac7d8 --- /dev/null +++ b/binder/Makefile @@ -0,0 +1,17 @@ +ccflags-y += -I$(src) -Wno-int-conversion -Wno-error=incompatible-pointer-types -DCONFIG_ANDROID_BINDER_DEVICES="\"hostbinder,hostvndbinder,hosthwbinder\"" -DCONFIG_ANDROID_BINDERFS +obj-m := binderfs_module.o binder_module.o +binder_module-y := deps.o binder.o binder_alloc.o +binderfs_module-y := deps.o binderfs.o + +KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build + +all: + $(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD + +install: + cp binder_module.ko $(DESTDIR)/ + cp binderfs_module.ko $(DESTDIR)/ + insmod binder_module.ko + insmod binderfs_module.ko +clean: + rm -rf *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions *.ur-safe diff --git a/binder/binder.c b/binder/binder.c new file mode 100644 index 0000000..a79204f --- /dev/null +++ b/binder/binder.c @@ -0,0 +1,5643 @@ +/* binder.c + * + * Android IPC Subsystem + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Locking overview + * + * There are 3 main spinlocks which must be acquired in the + * order shown: + * + * 1) proc->outer_lock : protects binder_ref + * binder_proc_lock() and binder_proc_unlock() are + * used to acq/rel. + * 2) node->lock : protects most fields of binder_node. + * binder_node_lock() and binder_node_unlock() are + * used to acq/rel + * 3) proc->inner_lock : protects the thread and node lists + * (proc->threads, proc->waiting_threads, proc->nodes) + * and all todo lists associated with the binder_proc + * (proc->todo, thread->todo, proc->delivered_death and + * node->async_todo), as well as thread->transaction_stack + * binder_inner_proc_lock() and binder_inner_proc_unlock() + * are used to acq/rel + * + * Any lock under procA must never be nested under any lock at the same + * level or below on procB. + * + * Functions that require a lock held on entry indicate which lock + * in the suffix of the function name: + * + * foo_olocked() : requires node->outer_lock + * foo_nlocked() : requires node->lock + * foo_ilocked() : requires proc->inner_lock + * foo_oilocked(): requires proc->outer_lock and proc->inner_lock + * foo_nilocked(): requires node->lock and proc->inner_lock + * ... + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#include +#include +#endif + +#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT +#define BINDER_IPC_32BIT 1 +#endif + +#include +#include "binder_alloc.h" +#include "binder_internal.h" +#include "binder_trace.h" + +static HLIST_HEAD(binder_deferred_list); +static DEFINE_MUTEX(binder_deferred_lock); + +static HLIST_HEAD(binder_devices); +static HLIST_HEAD(binder_procs); +static DEFINE_MUTEX(binder_procs_lock); + +static HLIST_HEAD(binder_dead_nodes); +static DEFINE_SPINLOCK(binder_dead_nodes_lock); + +static struct dentry *binder_debugfs_dir_entry_root; +static struct dentry *binder_debugfs_dir_entry_proc; +static atomic_t binder_last_id; + +#define BINDER_DEBUG_ENTRY(name) \ +static int binder_##name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, binder_##name##_show, inode->i_private); \ +} \ +\ +static const struct file_operations binder_##name##_fops = { \ + .owner = THIS_MODULE, \ + .open = binder_##name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +static int binder_proc_show(struct seq_file *m, void *unused); +BINDER_DEBUG_ENTRY(proc); + +/* This is only defined in include/asm-arm/sizes.h */ +#ifndef SZ_1K +#define SZ_1K 0x400 +#endif + +#ifndef SZ_4M +#define SZ_4M 0x400000 +#endif + +#define FORBIDDEN_MMAP_FLAGS (VM_WRITE) + +enum { + BINDER_DEBUG_USER_ERROR = 1U << 0, + BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, + BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, + BINDER_DEBUG_OPEN_CLOSE = 1U << 3, + BINDER_DEBUG_DEAD_BINDER = 1U << 4, + BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, + BINDER_DEBUG_READ_WRITE = 1U << 6, + BINDER_DEBUG_USER_REFS = 1U << 7, + BINDER_DEBUG_THREADS = 1U << 8, + BINDER_DEBUG_TRANSACTION = 1U << 9, + BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, + BINDER_DEBUG_FREE_BUFFER = 1U << 11, + BINDER_DEBUG_INTERNAL_REFS = 1U << 12, + BINDER_DEBUG_PRIORITY_CAP = 1U << 13, + BINDER_DEBUG_SPINLOCKS = 1U << 14, +}; +static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | + BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; +//yangbin tmp comments because it will cause crash when we insmod +//module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); + +static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; +module_param_named(devices, binder_devices_param, charp, 0444); + +static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); +static int binder_stop_on_user_error; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) +static int binder_set_stop_on_user_error(const char *val, + const struct kernel_param *kp) +#else +static int binder_set_stop_on_user_error(const char *val, + struct kernel_param *kp) +#endif +{ + int ret; + + ret = param_set_int(val, kp); + if (binder_stop_on_user_error < 2) + wake_up(&binder_user_error_wait); + return ret; +} +module_param_call(stop_on_user_error, binder_set_stop_on_user_error, + param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); + +#define binder_debug(mask, x...) \ + do { \ + if (binder_debug_mask & mask) \ + pr_info(x); \ + } while (0) + +#define binder_user_error(x...) \ + do { \ + if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ + pr_info(x); \ + if (binder_stop_on_user_error) \ + binder_stop_on_user_error = 2; \ + } while (0) + +#define to_flat_binder_object(hdr) \ + container_of(hdr, struct flat_binder_object, hdr) + +#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) + +#define to_binder_buffer_object(hdr) \ + container_of(hdr, struct binder_buffer_object, hdr) + +#define to_binder_fd_array_object(hdr) \ + container_of(hdr, struct binder_fd_array_object, hdr) + +enum binder_stat_types { + BINDER_STAT_PROC, + BINDER_STAT_THREAD, + BINDER_STAT_NODE, + BINDER_STAT_REF, + BINDER_STAT_DEATH, + BINDER_STAT_TRANSACTION, + BINDER_STAT_TRANSACTION_COMPLETE, + BINDER_STAT_COUNT +}; + +struct binder_stats { + atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; + atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; + atomic_t obj_created[BINDER_STAT_COUNT]; + atomic_t obj_deleted[BINDER_STAT_COUNT]; +}; + +static struct binder_stats binder_stats; + +static inline void binder_stats_deleted(enum binder_stat_types type) +{ + atomic_inc(&binder_stats.obj_deleted[type]); +} + +static inline void binder_stats_created(enum binder_stat_types type) +{ + atomic_inc(&binder_stats.obj_created[type]); +} + +struct binder_transaction_log_entry { + int debug_id; + int debug_id_done; + int call_type; + int from_proc; + int from_thread; + int target_handle; + int to_proc; + int to_thread; + int to_node; + int data_size; + int offsets_size; + int return_error_line; + uint32_t return_error; + uint32_t return_error_param; + const char *context_name; +}; +struct binder_transaction_log { + atomic_t cur; + bool full; + struct binder_transaction_log_entry entry[32]; +}; +static struct binder_transaction_log binder_transaction_log; +static struct binder_transaction_log binder_transaction_log_failed; + +static struct binder_transaction_log_entry *binder_transaction_log_add( + struct binder_transaction_log *log) +{ + struct binder_transaction_log_entry *e; + unsigned int cur = atomic_inc_return(&log->cur); + + if (cur >= ARRAY_SIZE(log->entry)) + log->full = 1; + e = &log->entry[cur % ARRAY_SIZE(log->entry)]; + WRITE_ONCE(e->debug_id_done, 0); + /* + * write-barrier to synchronize access to e->debug_id_done. + * We make sure the initialized 0 value is seen before + * memset() other fields are zeroed by memset. + */ + smp_wmb(); + memset(e, 0, sizeof(*e)); + return e; +} + +/** + * struct binder_work - work enqueued on a worklist + * @entry: node enqueued on list + * @type: type of work to be performed + * + * There are separate work lists for proc, thread, and node (async). + */ +struct binder_work { + struct list_head entry; + + enum { + BINDER_WORK_TRANSACTION = 1, + BINDER_WORK_TRANSACTION_COMPLETE, + BINDER_WORK_RETURN_ERROR, + BINDER_WORK_NODE, + BINDER_WORK_DEAD_BINDER, + BINDER_WORK_DEAD_BINDER_AND_CLEAR, + BINDER_WORK_CLEAR_DEATH_NOTIFICATION, + } type; +}; + +struct binder_error { + struct binder_work work; + uint32_t cmd; +}; + +/** + * struct binder_node - binder node bookkeeping + * @debug_id: unique ID for debugging + * (invariant after initialized) + * @lock: lock for node fields + * @work: worklist element for node work + * (protected by @proc->inner_lock) + * @rb_node: element for proc->nodes tree + * (protected by @proc->inner_lock) + * @dead_node: element for binder_dead_nodes list + * (protected by binder_dead_nodes_lock) + * @proc: binder_proc that owns this node + * (invariant after initialized) + * @refs: list of references on this node + * (protected by @lock) + * @internal_strong_refs: used to take strong references when + * initiating a transaction + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @local_weak_refs: weak user refs from local process + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @local_strong_refs: strong user refs from local process + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @tmp_refs: temporary kernel refs + * (protected by @proc->inner_lock while @proc + * is valid, and by binder_dead_nodes_lock + * if @proc is NULL. During inc/dec and node release + * it is also protected by @lock to provide safety + * as the node dies and @proc becomes NULL) + * @ptr: userspace pointer for node + * (invariant, no lock needed) + * @cookie: userspace cookie for node + * (invariant, no lock needed) + * @has_strong_ref: userspace notified of strong ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @pending_strong_ref: userspace has acked notification of strong ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @has_weak_ref: userspace notified of weak ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @pending_weak_ref: userspace has acked notification of weak ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @has_async_transaction: async transaction to node in progress + * (protected by @lock) + * @accept_fds: file descriptor operations supported for node + * (invariant after initialized) + * @min_priority: minimum scheduling priority + * (invariant after initialized) + * @async_todo: list of async work items + * (protected by @proc->inner_lock) + * + * Bookkeeping structure for binder nodes. + */ +struct binder_node { + int debug_id; + spinlock_t lock; + struct binder_work work; + union { + struct rb_node rb_node; + struct hlist_node dead_node; + }; + struct binder_proc *proc; + struct hlist_head refs; + int internal_strong_refs; + int local_weak_refs; + int local_strong_refs; + int tmp_refs; + binder_uintptr_t ptr; + binder_uintptr_t cookie; + struct { + /* + * bitfield elements protected by + * proc inner_lock + */ + u8 has_strong_ref:1; + u8 pending_strong_ref:1; + u8 has_weak_ref:1; + u8 pending_weak_ref:1; + }; + struct { + /* + * invariant after initialization + */ + u8 accept_fds:1; + u8 min_priority; + }; + bool has_async_transaction; + struct list_head async_todo; +}; + +struct binder_ref_death { + /** + * @work: worklist element for death notifications + * (protected by inner_lock of the proc that + * this ref belongs to) + */ + struct binder_work work; + binder_uintptr_t cookie; +}; + +/** + * struct binder_ref_data - binder_ref counts and id + * @debug_id: unique ID for the ref + * @desc: unique userspace handle for ref + * @strong: strong ref count (debugging only if not locked) + * @weak: weak ref count (debugging only if not locked) + * + * Structure to hold ref count and ref id information. Since + * the actual ref can only be accessed with a lock, this structure + * is used to return information about the ref to callers of + * ref inc/dec functions. + */ +struct binder_ref_data { + int debug_id; + uint32_t desc; + int strong; + int weak; +}; + +/** + * struct binder_ref - struct to track references on nodes + * @data: binder_ref_data containing id, handle, and current refcounts + * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree + * @rb_node_node: node for lookup by @node in proc's rb_tree + * @node_entry: list entry for node->refs list in target node + * (protected by @node->lock) + * @proc: binder_proc containing ref + * @node: binder_node of target node. When cleaning up a + * ref for deletion in binder_cleanup_ref, a non-NULL + * @node indicates the node must be freed + * @death: pointer to death notification (ref_death) if requested + * (protected by @node->lock) + * + * Structure to track references from procA to target node (on procB). This + * structure is unsafe to access without holding @proc->outer_lock. + */ +struct binder_ref { + /* Lookups needed: */ + /* node + proc => ref (transaction) */ + /* desc + proc => ref (transaction, inc/dec ref) */ + /* node => refs + procs (proc exit) */ + struct binder_ref_data data; + struct rb_node rb_node_desc; + struct rb_node rb_node_node; + struct hlist_node node_entry; + struct binder_proc *proc; + struct binder_node *node; + struct binder_ref_death *death; +}; + +enum binder_deferred_state { + BINDER_DEFERRED_PUT_FILES = 0x01, + BINDER_DEFERRED_FLUSH = 0x02, + BINDER_DEFERRED_RELEASE = 0x04, +}; + +/** + * struct binder_proc - binder process bookkeeping + * @proc_node: element for binder_procs list + * @threads: rbtree of binder_threads in this proc + * (protected by @inner_lock) + * @nodes: rbtree of binder nodes associated with + * this proc ordered by node->ptr + * (protected by @inner_lock) + * @refs_by_desc: rbtree of refs ordered by ref->desc + * (protected by @outer_lock) + * @refs_by_node: rbtree of refs ordered by ref->node + * (protected by @outer_lock) + * @waiting_threads: threads currently waiting for proc work + * (protected by @inner_lock) + * @pid PID of group_leader of process + * (invariant after initialized) + * @tsk task_struct for group_leader of process + * (invariant after initialized) + * @files files_struct for process + * (protected by @files_lock) + * @files_lock mutex to protect @files + * @deferred_work_node: element for binder_deferred_list + * (protected by binder_deferred_lock) + * @deferred_work: bitmap of deferred work to perform + * (protected by binder_deferred_lock) + * @is_dead: process is dead and awaiting free + * when outstanding transactions are cleaned up + * (protected by @inner_lock) + * @todo: list of work for this process + * (protected by @inner_lock) + * @wait: wait queue head to wait for proc work + * (invariant after initialized) + * @stats: per-process binder statistics + * (atomics, no lock needed) + * @delivered_death: list of delivered death notification + * (protected by @inner_lock) + * @max_threads: cap on number of binder threads + * (protected by @inner_lock) + * @requested_threads: number of binder threads requested but not + * yet started. In current implementation, can + * only be 0 or 1. + * (protected by @inner_lock) + * @requested_threads_started: number binder threads started + * (protected by @inner_lock) + * @tmp_ref: temporary reference to indicate proc is in use + * (protected by @inner_lock) + * @default_priority: default scheduler priority + * (invariant after initialized) + * @debugfs_entry: debugfs node + * @alloc: binder allocator bookkeeping + * @context: binder_context for this proc + * (invariant after initialized) + * @inner_lock: can nest under outer_lock and/or node lock + * @outer_lock: no nesting under innor or node lock + * Lock order: 1) outer, 2) node, 3) inner + * + * Bookkeeping structure for binder processes + */ +struct binder_proc { + struct hlist_node proc_node; + struct rb_root threads; + struct rb_root nodes; + struct rb_root refs_by_desc; + struct rb_root refs_by_node; + struct list_head waiting_threads; + int pid; + struct task_struct *tsk; + struct files_struct *files; + struct mutex files_lock; + struct hlist_node deferred_work_node; + int deferred_work; + bool is_dead; + + struct list_head todo; + wait_queue_head_t wait; + struct binder_stats stats; + struct list_head delivered_death; + int max_threads; + int requested_threads; + int requested_threads_started; + int tmp_ref; + long default_priority; + struct dentry *debugfs_entry; + struct binder_alloc alloc; + struct binder_context *context; + spinlock_t inner_lock; + spinlock_t outer_lock; +}; + +enum { + BINDER_LOOPER_STATE_REGISTERED = 0x01, + BINDER_LOOPER_STATE_ENTERED = 0x02, + BINDER_LOOPER_STATE_EXITED = 0x04, + BINDER_LOOPER_STATE_INVALID = 0x08, + BINDER_LOOPER_STATE_WAITING = 0x10, + BINDER_LOOPER_STATE_POLL = 0x20, +}; + +/** + * struct binder_thread - binder thread bookkeeping + * @proc: binder process for this thread + * (invariant after initialization) + * @rb_node: element for proc->threads rbtree + * (protected by @proc->inner_lock) + * @waiting_thread_node: element for @proc->waiting_threads list + * (protected by @proc->inner_lock) + * @pid: PID for this thread + * (invariant after initialization) + * @looper: bitmap of looping state + * (only accessed by this thread) + * @looper_needs_return: looping thread needs to exit driver + * (no lock needed) + * @transaction_stack: stack of in-progress transactions for this thread + * (protected by @proc->inner_lock) + * @todo: list of work to do for this thread + * (protected by @proc->inner_lock) + * @return_error: transaction errors reported by this thread + * (only accessed by this thread) + * @reply_error: transaction errors reported by target thread + * (protected by @proc->inner_lock) + * @wait: wait queue for thread work + * @stats: per-thread statistics + * (atomics, no lock needed) + * @tmp_ref: temporary reference to indicate thread is in use + * (atomic since @proc->inner_lock cannot + * always be acquired) + * @is_dead: thread is dead and awaiting free + * when outstanding transactions are cleaned up + * (protected by @proc->inner_lock) + * + * Bookkeeping structure for binder threads. + */ +struct binder_thread { + struct binder_proc *proc; + struct rb_node rb_node; + struct list_head waiting_thread_node; + int pid; + int looper; /* only modified by this thread */ + bool looper_need_return; /* can be written by other thread */ + struct binder_transaction *transaction_stack; + struct list_head todo; + struct binder_error return_error; + struct binder_error reply_error; + wait_queue_head_t wait; + struct binder_stats stats; + atomic_t tmp_ref; + bool is_dead; +}; + +struct binder_transaction { + int debug_id; + struct binder_work work; + struct binder_thread *from; + struct binder_transaction *from_parent; + struct binder_proc *to_proc; + struct binder_thread *to_thread; + struct binder_transaction *to_parent; + unsigned need_reply:1; + /* unsigned is_dead:1; */ /* not used at the moment */ + + struct binder_buffer *buffer; + unsigned int code; + unsigned int flags; + long priority; + long saved_priority; + kuid_t sender_euid; + /** + * @lock: protects @from, @to_proc, and @to_thread + * + * @from, @to_proc, and @to_thread can be set to NULL + * during thread teardown + */ + spinlock_t lock; +}; + +/** + * binder_proc_lock() - Acquire outer lock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Acquires proc->outer_lock. Used to protect binder_ref + * structures associated with the given proc. + */ +#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) +static void +_binder_proc_lock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&proc->outer_lock); +} + +/** + * binder_proc_unlock() - Release spinlock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Release lock acquired via binder_proc_lock() + */ +#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) +static void +_binder_proc_unlock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_unlock(&proc->outer_lock); +} + +/** + * binder_inner_proc_lock() - Acquire inner lock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Acquires proc->inner_lock. Used to protect todo lists + */ +#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) +static void +_binder_inner_proc_lock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&proc->inner_lock); +} + +/** + * binder_inner_proc_unlock() - Release inner lock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Release lock acquired via binder_inner_proc_lock() + */ +#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) +static void +_binder_inner_proc_unlock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_unlock(&proc->inner_lock); +} + +/** + * binder_node_lock() - Acquire spinlock for given binder_node + * @node: struct binder_node to acquire + * + * Acquires node->lock. Used to protect binder_node fields + */ +#define binder_node_lock(node) _binder_node_lock(node, __LINE__) +static void +_binder_node_lock(struct binder_node *node, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&node->lock); +} + +/** + * binder_node_unlock() - Release spinlock for given binder_proc + * @node: struct binder_node to acquire + * + * Release lock acquired via binder_node_lock() + */ +#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) +static void +_binder_node_unlock(struct binder_node *node, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_unlock(&node->lock); +} + +/** + * binder_node_inner_lock() - Acquire node and inner locks + * @node: struct binder_node to acquire + * + * Acquires node->lock. If node->proc also acquires + * proc->inner_lock. Used to protect binder_node fields + */ +#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) +static void +_binder_node_inner_lock(struct binder_node *node, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&node->lock); + if (node->proc) + binder_inner_proc_lock(node->proc); +} + +/** + * binder_node_unlock() - Release node and inner locks + * @node: struct binder_node to acquire + * + * Release lock acquired via binder_node_lock() + */ +#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) +static void +_binder_node_inner_unlock(struct binder_node *node, int line) +{ + struct binder_proc *proc = node->proc; + + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + if (proc) + binder_inner_proc_unlock(proc); + spin_unlock(&node->lock); +} + +static bool binder_worklist_empty_ilocked(struct list_head *list) +{ + return list_empty(list); +} + +/** + * binder_worklist_empty() - Check if no items on the work list + * @proc: binder_proc associated with list + * @list: list to check + * + * Return: true if there are no items on list, else false + */ +static bool binder_worklist_empty(struct binder_proc *proc, + struct list_head *list) +{ + bool ret; + + binder_inner_proc_lock(proc); + ret = binder_worklist_empty_ilocked(list); + binder_inner_proc_unlock(proc); + return ret; +} + +static void +binder_enqueue_work_ilocked(struct binder_work *work, + struct list_head *target_list) +{ + BUG_ON(target_list == NULL); + BUG_ON(work->entry.next && !list_empty(&work->entry)); + list_add_tail(&work->entry, target_list); +} + +/** + * binder_enqueue_work() - Add an item to the work list + * @proc: binder_proc associated with list + * @work: struct binder_work to add to list + * @target_list: list to add work to + * + * Adds the work to the specified list. Asserts that work + * is not already on a list. + */ +static void +binder_enqueue_work(struct binder_proc *proc, + struct binder_work *work, + struct list_head *target_list) +{ + binder_inner_proc_lock(proc); + binder_enqueue_work_ilocked(work, target_list); + binder_inner_proc_unlock(proc); +} + +static void +binder_dequeue_work_ilocked(struct binder_work *work) +{ + list_del_init(&work->entry); +} + +/** + * binder_dequeue_work() - Removes an item from the work list + * @proc: binder_proc associated with list + * @work: struct binder_work to remove from list + * + * Removes the specified work item from whatever list it is on. + * Can safely be called if work is not on any list. + */ +static void +binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) +{ + binder_inner_proc_lock(proc); + binder_dequeue_work_ilocked(work); + binder_inner_proc_unlock(proc); +} + +static struct binder_work *binder_dequeue_work_head_ilocked( + struct list_head *list) +{ + struct binder_work *w; + + w = list_first_entry_or_null(list, struct binder_work, entry); + if (w) + list_del_init(&w->entry); + return w; +} + +/** + * binder_dequeue_work_head() - Dequeues the item at head of list + * @proc: binder_proc associated with list + * @list: list to dequeue head + * + * Removes the head of the list if there are items on the list + * + * Return: pointer dequeued binder_work, NULL if list was empty + */ +static struct binder_work *binder_dequeue_work_head( + struct binder_proc *proc, + struct list_head *list) +{ + struct binder_work *w; + + binder_inner_proc_lock(proc); + w = binder_dequeue_work_head_ilocked(list); + binder_inner_proc_unlock(proc); + return w; +} + +static void +binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); +static void binder_free_thread(struct binder_thread *thread); +static void binder_free_proc(struct binder_proc *proc); +static void binder_inc_node_tmpref_ilocked(struct binder_node *node); + +static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) +{ + unsigned long rlim_cur; + unsigned long irqs; + int ret; + + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + ret = -ESRCH; + goto err; + } + if (!lock_task_sighand(proc->tsk, &irqs)) { + ret = -EMFILE; + goto err; + } + rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); + unlock_task_sighand(proc->tsk, &irqs); + + ret = __alloc_fd(proc->files, 0, rlim_cur, flags); +err: + mutex_unlock(&proc->files_lock); + return ret; +} + +/* + * copied from fd_install + */ +static void task_fd_install( + struct binder_proc *proc, unsigned int fd, struct file *file) +{ + mutex_lock(&proc->files_lock); + if (proc->files) + __fd_install(proc->files, fd, file); + mutex_unlock(&proc->files_lock); +} + +/* + * copied from sys_close + */ +static long task_close_fd(struct binder_proc *proc, unsigned int fd) +{ + int retval; + + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + retval = -ESRCH; + goto err; + } + retval = __close_fd(proc->files, fd); + /* can't restart close syscall because file table entry was cleared */ + if (unlikely(retval == -ERESTARTSYS || + retval == -ERESTARTNOINTR || + retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK)) + retval = -EINTR; +err: + mutex_unlock(&proc->files_lock); + return retval; +} + +static bool binder_has_work_ilocked(struct binder_thread *thread, + bool do_proc_work) +{ + return !binder_worklist_empty_ilocked(&thread->todo) || + thread->looper_need_return || + (do_proc_work && + !binder_worklist_empty_ilocked(&thread->proc->todo)); +} + +static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) +{ + bool has_work; + + binder_inner_proc_lock(thread->proc); + has_work = binder_has_work_ilocked(thread, do_proc_work); + binder_inner_proc_unlock(thread->proc); + + return has_work; +} + +static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) +{ + return !thread->transaction_stack && + binder_worklist_empty_ilocked(&thread->todo) && + (thread->looper & (BINDER_LOOPER_STATE_ENTERED | + BINDER_LOOPER_STATE_REGISTERED)); +} + +static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, + bool sync) +{ + struct rb_node *n; + struct binder_thread *thread; + + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { + thread = rb_entry(n, struct binder_thread, rb_node); + if (thread->looper & BINDER_LOOPER_STATE_POLL && + binder_available_for_proc_work_ilocked(thread)) { + if (sync) + wake_up_interruptible_sync(&thread->wait); + else + wake_up_interruptible(&thread->wait); + } + } +} + +/** + * binder_select_thread_ilocked() - selects a thread for doing proc work. + * @proc: process to select a thread from + * + * Note that calling this function moves the thread off the waiting_threads + * list, so it can only be woken up by the caller of this function, or a + * signal. Therefore, callers *should* always wake up the thread this function + * returns. + * + * Return: If there's a thread currently waiting for process work, + * returns that thread. Otherwise returns NULL. + */ +static struct binder_thread * +binder_select_thread_ilocked(struct binder_proc *proc) +{ + struct binder_thread *thread; + + assert_spin_locked(&proc->inner_lock); + thread = list_first_entry_or_null(&proc->waiting_threads, + struct binder_thread, + waiting_thread_node); + + if (thread) + list_del_init(&thread->waiting_thread_node); + + return thread; +} + +/** + * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. + * @proc: process to wake up a thread in + * @thread: specific thread to wake-up (may be NULL) + * @sync: whether to do a synchronous wake-up + * + * This function wakes up a thread in the @proc process. + * The caller may provide a specific thread to wake-up in + * the @thread parameter. If @thread is NULL, this function + * will wake up threads that have called poll(). + * + * Note that for this function to work as expected, callers + * should first call binder_select_thread() to find a thread + * to handle the work (if they don't have a thread already), + * and pass the result into the @thread parameter. + */ +static void binder_wakeup_thread_ilocked(struct binder_proc *proc, + struct binder_thread *thread, + bool sync) +{ + assert_spin_locked(&proc->inner_lock); + + if (thread) { + if (sync) + wake_up_interruptible_sync(&thread->wait); + else + wake_up_interruptible(&thread->wait); + return; + } + + /* Didn't find a thread waiting for proc work; this can happen + * in two scenarios: + * 1. All threads are busy handling transactions + * In that case, one of those threads should call back into + * the kernel driver soon and pick up this work. + * 2. Threads are using the (e)poll interface, in which case + * they may be blocked on the waitqueue without having been + * added to waiting_threads. For this case, we just iterate + * over all threads not handling transaction work, and + * wake them all up. We wake all because we don't know whether + * a thread that called into (e)poll is handling non-binder + * work currently. + */ + binder_wakeup_poll_threads_ilocked(proc, sync); +} + +static void binder_wakeup_proc_ilocked(struct binder_proc *proc) +{ + struct binder_thread *thread = binder_select_thread_ilocked(proc); + + binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); +} + +static void binder_set_nice(long nice) +{ + long min_nice; + + if (can_nice(current, nice)) { + set_user_nice(current, nice); + return; + } + min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); + binder_debug(BINDER_DEBUG_PRIORITY_CAP, + "%d: nice value %ld not allowed use %ld instead\n", + current->pid, nice, min_nice); + set_user_nice(current, min_nice); + if (min_nice <= MAX_NICE) + return; + binder_user_error("%d RLIMIT_NICE not set\n", current->pid); +} + +static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, + binder_uintptr_t ptr) +{ + struct rb_node *n = proc->nodes.rb_node; + struct binder_node *node; + + assert_spin_locked(&proc->inner_lock); + + while (n) { + node = rb_entry(n, struct binder_node, rb_node); + + if (ptr < node->ptr) + n = n->rb_left; + else if (ptr > node->ptr) + n = n->rb_right; + else { + /* + * take an implicit weak reference + * to ensure node stays alive until + * call to binder_put_node() + */ + binder_inc_node_tmpref_ilocked(node); + return node; + } + } + return NULL; +} + +static struct binder_node *binder_get_node(struct binder_proc *proc, + binder_uintptr_t ptr) +{ + struct binder_node *node; + + binder_inner_proc_lock(proc); + node = binder_get_node_ilocked(proc, ptr); + binder_inner_proc_unlock(proc); + return node; +} + +static struct binder_node *binder_init_node_ilocked( + struct binder_proc *proc, + struct binder_node *new_node, + struct flat_binder_object *fp) +{ + struct rb_node **p = &proc->nodes.rb_node; + struct rb_node *parent = NULL; + struct binder_node *node; + binder_uintptr_t ptr = fp ? fp->binder : 0; + binder_uintptr_t cookie = fp ? fp->cookie : 0; + __u32 flags = fp ? fp->flags : 0; + + assert_spin_locked(&proc->inner_lock); + + while (*p) { + + parent = *p; + node = rb_entry(parent, struct binder_node, rb_node); + + if (ptr < node->ptr) + p = &(*p)->rb_left; + else if (ptr > node->ptr) + p = &(*p)->rb_right; + else { + /* + * A matching node is already in + * the rb tree. Abandon the init + * and return it. + */ + binder_inc_node_tmpref_ilocked(node); + return node; + } + } + node = new_node; + binder_stats_created(BINDER_STAT_NODE); + node->tmp_refs++; + rb_link_node(&node->rb_node, parent, p); + rb_insert_color(&node->rb_node, &proc->nodes); + node->debug_id = atomic_inc_return(&binder_last_id); + node->proc = proc; + node->ptr = ptr; + node->cookie = cookie; + node->work.type = BINDER_WORK_NODE; + node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; + node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + spin_lock_init(&node->lock); + INIT_LIST_HEAD(&node->work.entry); + INIT_LIST_HEAD(&node->async_todo); + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "%d:%d node %d u%016llx c%016llx created\n", + proc->pid, current->pid, node->debug_id, + (u64)node->ptr, (u64)node->cookie); + + return node; +} + +static struct binder_node *binder_new_node(struct binder_proc *proc, + struct flat_binder_object *fp) +{ + struct binder_node *node; + struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); + + if (!new_node) + return NULL; + binder_inner_proc_lock(proc); + node = binder_init_node_ilocked(proc, new_node, fp); + binder_inner_proc_unlock(proc); + if (node != new_node) + /* + * The node was already added by another thread + */ + kfree(new_node); + + return node; +} + +static void binder_free_node(struct binder_node *node) +{ + kfree(node); + binder_stats_deleted(BINDER_STAT_NODE); +} + +static int binder_inc_node_nilocked(struct binder_node *node, int strong, + int internal, + struct list_head *target_list) +{ + struct binder_proc *proc = node->proc; + + assert_spin_locked(&node->lock); + if (proc) + assert_spin_locked(&proc->inner_lock); + if (strong) { + if (internal) { + if (target_list == NULL && + node->internal_strong_refs == 0 && + !(node->proc && + node == node->proc->context->binder_context_mgr_node && + node->has_strong_ref)) { + pr_err("invalid inc strong node for %d\n", + node->debug_id); + return -EINVAL; + } + node->internal_strong_refs++; + } else + node->local_strong_refs++; + if (!node->has_strong_ref && target_list) { + binder_dequeue_work_ilocked(&node->work); + binder_enqueue_work_ilocked(&node->work, target_list); + } + } else { + if (!internal) + node->local_weak_refs++; + if (!node->has_weak_ref && list_empty(&node->work.entry)) { + if (target_list == NULL) { + pr_err("invalid inc weak node for %d\n", + node->debug_id); + return -EINVAL; + } + binder_enqueue_work_ilocked(&node->work, target_list); + } + } + return 0; +} + +static int binder_inc_node(struct binder_node *node, int strong, int internal, + struct list_head *target_list) +{ + int ret; + + binder_node_inner_lock(node); + ret = binder_inc_node_nilocked(node, strong, internal, target_list); + binder_node_inner_unlock(node); + + return ret; +} + +static bool binder_dec_node_nilocked(struct binder_node *node, + int strong, int internal) +{ + struct binder_proc *proc = node->proc; + + assert_spin_locked(&node->lock); + if (proc) + assert_spin_locked(&proc->inner_lock); + if (strong) { + if (internal) + node->internal_strong_refs--; + else + node->local_strong_refs--; + if (node->local_strong_refs || node->internal_strong_refs) + return false; + } else { + if (!internal) + node->local_weak_refs--; + if (node->local_weak_refs || node->tmp_refs || + !hlist_empty(&node->refs)) + return false; + } + + if (proc && (node->has_strong_ref || node->has_weak_ref)) { + if (list_empty(&node->work.entry)) { + binder_enqueue_work_ilocked(&node->work, &proc->todo); + binder_wakeup_proc_ilocked(proc); + } + } else { + if (hlist_empty(&node->refs) && !node->local_strong_refs && + !node->local_weak_refs && !node->tmp_refs) { + if (proc) { + binder_dequeue_work_ilocked(&node->work); + rb_erase(&node->rb_node, &proc->nodes); + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "refless node %d deleted\n", + node->debug_id); + } else { + BUG_ON(!list_empty(&node->work.entry)); + spin_lock(&binder_dead_nodes_lock); + /* + * tmp_refs could have changed so + * check it again + */ + if (node->tmp_refs) { + spin_unlock(&binder_dead_nodes_lock); + return false; + } + hlist_del(&node->dead_node); + spin_unlock(&binder_dead_nodes_lock); + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "dead node %d deleted\n", + node->debug_id); + } + return true; + } + } + return false; +} + +static void binder_dec_node(struct binder_node *node, int strong, int internal) +{ + bool free_node; + + binder_node_inner_lock(node); + free_node = binder_dec_node_nilocked(node, strong, internal); + binder_node_inner_unlock(node); + if (free_node) + binder_free_node(node); +} + +static void binder_inc_node_tmpref_ilocked(struct binder_node *node) +{ + /* + * No call to binder_inc_node() is needed since we + * don't need to inform userspace of any changes to + * tmp_refs + */ + node->tmp_refs++; +} + +/** + * binder_inc_node_tmpref() - take a temporary reference on node + * @node: node to reference + * + * Take reference on node to prevent the node from being freed + * while referenced only by a local variable. The inner lock is + * needed to serialize with the node work on the queue (which + * isn't needed after the node is dead). If the node is dead + * (node->proc is NULL), use binder_dead_nodes_lock to protect + * node->tmp_refs against dead-node-only cases where the node + * lock cannot be acquired (eg traversing the dead node list to + * print nodes) + */ +static void binder_inc_node_tmpref(struct binder_node *node) +{ + binder_node_lock(node); + if (node->proc) + binder_inner_proc_lock(node->proc); + else + spin_lock(&binder_dead_nodes_lock); + binder_inc_node_tmpref_ilocked(node); + if (node->proc) + binder_inner_proc_unlock(node->proc); + else + spin_unlock(&binder_dead_nodes_lock); + binder_node_unlock(node); +} + +/** + * binder_dec_node_tmpref() - remove a temporary reference on node + * @node: node to reference + * + * Release temporary reference on node taken via binder_inc_node_tmpref() + */ +static void binder_dec_node_tmpref(struct binder_node *node) +{ + bool free_node; + + binder_node_inner_lock(node); + if (!node->proc) + spin_lock(&binder_dead_nodes_lock); + node->tmp_refs--; + BUG_ON(node->tmp_refs < 0); + if (!node->proc) + spin_unlock(&binder_dead_nodes_lock); + /* + * Call binder_dec_node() to check if all refcounts are 0 + * and cleanup is needed. Calling with strong=0 and internal=1 + * causes no actual reference to be released in binder_dec_node(). + * If that changes, a change is needed here too. + */ + free_node = binder_dec_node_nilocked(node, 0, 1); + binder_node_inner_unlock(node); + if (free_node) + binder_free_node(node); +} + +static void binder_put_node(struct binder_node *node) +{ + binder_dec_node_tmpref(node); +} + +static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, + u32 desc, bool need_strong_ref) +{ + struct rb_node *n = proc->refs_by_desc.rb_node; + struct binder_ref *ref; + + while (n) { + ref = rb_entry(n, struct binder_ref, rb_node_desc); + + if (desc < ref->data.desc) { + n = n->rb_left; + } else if (desc > ref->data.desc) { + n = n->rb_right; + } else if (need_strong_ref && !ref->data.strong) { + binder_user_error("tried to use weak ref as strong ref\n"); + return NULL; + } else { + return ref; + } + } + return NULL; +} + +/** + * binder_get_ref_for_node_olocked() - get the ref associated with given node + * @proc: binder_proc that owns the ref + * @node: binder_node of target + * @new_ref: newly allocated binder_ref to be initialized or %NULL + * + * Look up the ref for the given node and return it if it exists + * + * If it doesn't exist and the caller provides a newly allocated + * ref, initialize the fields of the newly allocated ref and insert + * into the given proc rb_trees and node refs list. + * + * Return: the ref for node. It is possible that another thread + * allocated/initialized the ref first in which case the + * returned ref would be different than the passed-in + * new_ref. new_ref must be kfree'd by the caller in + * this case. + */ +static struct binder_ref *binder_get_ref_for_node_olocked( + struct binder_proc *proc, + struct binder_node *node, + struct binder_ref *new_ref) +{ + struct binder_context *context = proc->context; + struct rb_node **p = &proc->refs_by_node.rb_node; + struct rb_node *parent = NULL; + struct binder_ref *ref; + struct rb_node *n; + + while (*p) { + parent = *p; + ref = rb_entry(parent, struct binder_ref, rb_node_node); + + if (node < ref->node) + p = &(*p)->rb_left; + else if (node > ref->node) + p = &(*p)->rb_right; + else + return ref; + } + if (!new_ref) + return NULL; + + binder_stats_created(BINDER_STAT_REF); + new_ref->data.debug_id = atomic_inc_return(&binder_last_id); + new_ref->proc = proc; + new_ref->node = node; + rb_link_node(&new_ref->rb_node_node, parent, p); + rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); + + new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; + for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { + ref = rb_entry(n, struct binder_ref, rb_node_desc); + if (ref->data.desc > new_ref->data.desc) + break; + new_ref->data.desc = ref->data.desc + 1; + } + + p = &proc->refs_by_desc.rb_node; + while (*p) { + parent = *p; + ref = rb_entry(parent, struct binder_ref, rb_node_desc); + + if (new_ref->data.desc < ref->data.desc) + p = &(*p)->rb_left; + else if (new_ref->data.desc > ref->data.desc) + p = &(*p)->rb_right; + else + BUG(); + } + rb_link_node(&new_ref->rb_node_desc, parent, p); + rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); + + binder_node_lock(node); + hlist_add_head(&new_ref->node_entry, &node->refs); + + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "%d new ref %d desc %d for node %d\n", + proc->pid, new_ref->data.debug_id, new_ref->data.desc, + node->debug_id); + binder_node_unlock(node); + return new_ref; +} + +static void binder_cleanup_ref_olocked(struct binder_ref *ref) +{ + bool delete_node = false; + + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "%d delete ref %d desc %d for node %d\n", + ref->proc->pid, ref->data.debug_id, ref->data.desc, + ref->node->debug_id); + + rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); + rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); + + binder_node_inner_lock(ref->node); + if (ref->data.strong) + binder_dec_node_nilocked(ref->node, 1, 1); + + hlist_del(&ref->node_entry); + delete_node = binder_dec_node_nilocked(ref->node, 0, 1); + binder_node_inner_unlock(ref->node); + /* + * Clear ref->node unless we want the caller to free the node + */ + if (!delete_node) { + /* + * The caller uses ref->node to determine + * whether the node needs to be freed. Clear + * it since the node is still alive. + */ + ref->node = NULL; + } + + if (ref->death) { + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "%d delete ref %d desc %d has death notification\n", + ref->proc->pid, ref->data.debug_id, + ref->data.desc); + binder_dequeue_work(ref->proc, &ref->death->work); + binder_stats_deleted(BINDER_STAT_DEATH); + } + binder_stats_deleted(BINDER_STAT_REF); +} + +/** + * binder_inc_ref_olocked() - increment the ref for given handle + * @ref: ref to be incremented + * @strong: if true, strong increment, else weak + * @target_list: list to queue node work on + * + * Increment the ref. @ref->proc->outer_lock must be held on entry + * + * Return: 0, if successful, else errno + */ +static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, + struct list_head *target_list) +{ + int ret; + + if (strong) { + if (ref->data.strong == 0) { + ret = binder_inc_node(ref->node, 1, 1, target_list); + if (ret) + return ret; + } + ref->data.strong++; + } else { + if (ref->data.weak == 0) { + ret = binder_inc_node(ref->node, 0, 1, target_list); + if (ret) + return ret; + } + ref->data.weak++; + } + return 0; +} + +/** + * binder_dec_ref() - dec the ref for given handle + * @ref: ref to be decremented + * @strong: if true, strong decrement, else weak + * + * Decrement the ref. + * + * Return: true if ref is cleaned up and ready to be freed + */ +static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) +{ + if (strong) { + if (ref->data.strong == 0) { + binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", + ref->proc->pid, ref->data.debug_id, + ref->data.desc, ref->data.strong, + ref->data.weak); + return false; + } + ref->data.strong--; + if (ref->data.strong == 0) + binder_dec_node(ref->node, strong, 1); + } else { + if (ref->data.weak == 0) { + binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", + ref->proc->pid, ref->data.debug_id, + ref->data.desc, ref->data.strong, + ref->data.weak); + return false; + } + ref->data.weak--; + } + if (ref->data.strong == 0 && ref->data.weak == 0) { + binder_cleanup_ref_olocked(ref); + return true; + } + return false; +} + +/** + * binder_get_node_from_ref() - get the node from the given proc/desc + * @proc: proc containing the ref + * @desc: the handle associated with the ref + * @need_strong_ref: if true, only return node if ref is strong + * @rdata: the id/refcount data for the ref + * + * Given a proc and ref handle, return the associated binder_node + * + * Return: a binder_node or NULL if not found or not strong when strong required + */ +static struct binder_node *binder_get_node_from_ref( + struct binder_proc *proc, + u32 desc, bool need_strong_ref, + struct binder_ref_data *rdata) +{ + struct binder_node *node; + struct binder_ref *ref; + + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, desc, need_strong_ref); + if (!ref) + goto err_no_ref; + node = ref->node; + /* + * Take an implicit reference on the node to ensure + * it stays alive until the call to binder_put_node() + */ + binder_inc_node_tmpref(node); + if (rdata) + *rdata = ref->data; + binder_proc_unlock(proc); + + return node; + +err_no_ref: + binder_proc_unlock(proc); + return NULL; +} + +/** + * binder_free_ref() - free the binder_ref + * @ref: ref to free + * + * Free the binder_ref. Free the binder_node indicated by ref->node + * (if non-NULL) and the binder_ref_death indicated by ref->death. + */ +static void binder_free_ref(struct binder_ref *ref) +{ + if (ref->node) + binder_free_node(ref->node); + kfree(ref->death); + kfree(ref); +} + +/** + * binder_update_ref_for_handle() - inc/dec the ref for given handle + * @proc: proc containing the ref + * @desc: the handle associated with the ref + * @increment: true=inc reference, false=dec reference + * @strong: true=strong reference, false=weak reference + * @rdata: the id/refcount data for the ref + * + * Given a proc and ref handle, increment or decrement the ref + * according to "increment" arg. + * + * Return: 0 if successful, else errno + */ +static int binder_update_ref_for_handle(struct binder_proc *proc, + uint32_t desc, bool increment, bool strong, + struct binder_ref_data *rdata) +{ + int ret = 0; + struct binder_ref *ref; + bool delete_ref = false; + + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, desc, strong); + if (!ref) { + ret = -EINVAL; + goto err_no_ref; + } + if (increment) + ret = binder_inc_ref_olocked(ref, strong, NULL); + else + delete_ref = binder_dec_ref_olocked(ref, strong); + + if (rdata) + *rdata = ref->data; + binder_proc_unlock(proc); + + if (delete_ref) + binder_free_ref(ref); + return ret; + +err_no_ref: + binder_proc_unlock(proc); + return ret; +} + +/** + * binder_dec_ref_for_handle() - dec the ref for given handle + * @proc: proc containing the ref + * @desc: the handle associated with the ref + * @strong: true=strong reference, false=weak reference + * @rdata: the id/refcount data for the ref + * + * Just calls binder_update_ref_for_handle() to decrement the ref. + * + * Return: 0 if successful, else errno + */ +static int binder_dec_ref_for_handle(struct binder_proc *proc, + uint32_t desc, bool strong, struct binder_ref_data *rdata) +{ + return binder_update_ref_for_handle(proc, desc, false, strong, rdata); +} + + +/** + * binder_inc_ref_for_node() - increment the ref for given proc/node + * @proc: proc containing the ref + * @node: target node + * @strong: true=strong reference, false=weak reference + * @target_list: worklist to use if node is incremented + * @rdata: the id/refcount data for the ref + * + * Given a proc and node, increment the ref. Create the ref if it + * doesn't already exist + * + * Return: 0 if successful, else errno + */ +static int binder_inc_ref_for_node(struct binder_proc *proc, + struct binder_node *node, + bool strong, + struct list_head *target_list, + struct binder_ref_data *rdata) +{ + struct binder_ref *ref; + struct binder_ref *new_ref = NULL; + int ret = 0; + + binder_proc_lock(proc); + ref = binder_get_ref_for_node_olocked(proc, node, NULL); + if (!ref) { + binder_proc_unlock(proc); + new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); + if (!new_ref) + return -ENOMEM; + binder_proc_lock(proc); + ref = binder_get_ref_for_node_olocked(proc, node, new_ref); + } + ret = binder_inc_ref_olocked(ref, strong, target_list); + *rdata = ref->data; + binder_proc_unlock(proc); + if (new_ref && ref != new_ref) + /* + * Another thread created the ref first so + * free the one we allocated + */ + kfree(new_ref); + return ret; +} + +static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, + struct binder_transaction *t) +{ + BUG_ON(!target_thread); + assert_spin_locked(&target_thread->proc->inner_lock); + BUG_ON(target_thread->transaction_stack != t); + BUG_ON(target_thread->transaction_stack->from != target_thread); + target_thread->transaction_stack = + target_thread->transaction_stack->from_parent; + t->from = NULL; +} + +/** + * binder_thread_dec_tmpref() - decrement thread->tmp_ref + * @thread: thread to decrement + * + * A thread needs to be kept alive while being used to create or + * handle a transaction. binder_get_txn_from() is used to safely + * extract t->from from a binder_transaction and keep the thread + * indicated by t->from from being freed. When done with that + * binder_thread, this function is called to decrement the + * tmp_ref and free if appropriate (thread has been released + * and no transaction being processed by the driver) + */ +static void binder_thread_dec_tmpref(struct binder_thread *thread) +{ + /* + * atomic is used to protect the counter value while + * it cannot reach zero or thread->is_dead is false + */ + binder_inner_proc_lock(thread->proc); + atomic_dec(&thread->tmp_ref); + if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { + binder_inner_proc_unlock(thread->proc); + binder_free_thread(thread); + return; + } + binder_inner_proc_unlock(thread->proc); +} + +/** + * binder_proc_dec_tmpref() - decrement proc->tmp_ref + * @proc: proc to decrement + * + * A binder_proc needs to be kept alive while being used to create or + * handle a transaction. proc->tmp_ref is incremented when + * creating a new transaction or the binder_proc is currently in-use + * by threads that are being released. When done with the binder_proc, + * this function is called to decrement the counter and free the + * proc if appropriate (proc has been released, all threads have + * been released and not currenly in-use to process a transaction). + */ +static void binder_proc_dec_tmpref(struct binder_proc *proc) +{ + binder_inner_proc_lock(proc); + proc->tmp_ref--; + if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && + !proc->tmp_ref) { + binder_inner_proc_unlock(proc); + binder_free_proc(proc); + return; + } + binder_inner_proc_unlock(proc); +} + +/** + * binder_get_txn_from() - safely extract the "from" thread in transaction + * @t: binder transaction for t->from + * + * Atomically return the "from" thread and increment the tmp_ref + * count for the thread to ensure it stays alive until + * binder_thread_dec_tmpref() is called. + * + * Return: the value of t->from + */ +static struct binder_thread *binder_get_txn_from( + struct binder_transaction *t) +{ + struct binder_thread *from; + + spin_lock(&t->lock); + from = t->from; + if (from) + atomic_inc(&from->tmp_ref); + spin_unlock(&t->lock); + return from; +} + +/** + * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock + * @t: binder transaction for t->from + * + * Same as binder_get_txn_from() except it also acquires the proc->inner_lock + * to guarantee that the thread cannot be released while operating on it. + * The caller must call binder_inner_proc_unlock() to release the inner lock + * as well as call binder_dec_thread_txn() to release the reference. + * + * Return: the value of t->from + */ +static struct binder_thread *binder_get_txn_from_and_acq_inner( + struct binder_transaction *t) +{ + struct binder_thread *from; + + from = binder_get_txn_from(t); + if (!from) + return NULL; + binder_inner_proc_lock(from->proc); + if (t->from) { + BUG_ON(from != t->from); + return from; + } + binder_inner_proc_unlock(from->proc); + binder_thread_dec_tmpref(from); + return NULL; +} + +static void binder_free_transaction(struct binder_transaction *t) +{ + if (t->buffer) + t->buffer->transaction = NULL; + kfree(t); + binder_stats_deleted(BINDER_STAT_TRANSACTION); +} + +static void binder_send_failed_reply(struct binder_transaction *t, + uint32_t error_code) +{ + struct binder_thread *target_thread; + struct binder_transaction *next; + + BUG_ON(t->flags & TF_ONE_WAY); + while (1) { + target_thread = binder_get_txn_from_and_acq_inner(t); + if (target_thread) { + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "send failed reply for transaction %d to %d:%d\n", + t->debug_id, + target_thread->proc->pid, + target_thread->pid); + + binder_pop_transaction_ilocked(target_thread, t); + if (target_thread->reply_error.cmd == BR_OK) { + target_thread->reply_error.cmd = error_code; + binder_enqueue_work_ilocked( + &target_thread->reply_error.work, + &target_thread->todo); + wake_up_interruptible(&target_thread->wait); + } else { + WARN(1, "Unexpected reply error: %u\n", + target_thread->reply_error.cmd); + } + binder_inner_proc_unlock(target_thread->proc); + binder_thread_dec_tmpref(target_thread); + binder_free_transaction(t); + return; + } + next = t->from_parent; + + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "send failed reply for transaction %d, target dead\n", + t->debug_id); + + binder_free_transaction(t); + if (next == NULL) { + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "reply failed, no target thread at root\n"); + return; + } + t = next; + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "reply failed, no target thread -- retry %d\n", + t->debug_id); + } +} + +/** + * binder_cleanup_transaction() - cleans up undelivered transaction + * @t: transaction that needs to be cleaned up + * @reason: reason the transaction wasn't delivered + * @error_code: error to return to caller (if synchronous call) + */ +static void binder_cleanup_transaction(struct binder_transaction *t, + const char *reason, + uint32_t error_code) +{ + if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { + binder_send_failed_reply(t, error_code); + } else { + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "undelivered transaction %d, %s\n", + t->debug_id, reason); + binder_free_transaction(t); + } +} + +/** + * binder_validate_object() - checks for a valid metadata object in a buffer. + * @buffer: binder_buffer that we're parsing. + * @offset: offset in the buffer at which to validate an object. + * + * Return: If there's a valid metadata object at @offset in @buffer, the + * size of that object. Otherwise, it returns zero. + */ +static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) +{ + /* Check if we can read a header first */ + struct binder_object_header *hdr; + size_t object_size = 0; + + if (offset > buffer->data_size - sizeof(*hdr) || + buffer->data_size < sizeof(*hdr) || + !IS_ALIGNED(offset, sizeof(u32))) + return 0; + + /* Ok, now see if we can read a complete object. */ + hdr = (struct binder_object_header *)(buffer->data + offset); + switch (hdr->type) { + case BINDER_TYPE_BINDER: + case BINDER_TYPE_WEAK_BINDER: + case BINDER_TYPE_HANDLE: + case BINDER_TYPE_WEAK_HANDLE: + object_size = sizeof(struct flat_binder_object); + break; + case BINDER_TYPE_FD: + object_size = sizeof(struct binder_fd_object); + break; + case BINDER_TYPE_PTR: + object_size = sizeof(struct binder_buffer_object); + break; + case BINDER_TYPE_FDA: + object_size = sizeof(struct binder_fd_array_object); + break; + default: + return 0; + } + if (offset <= buffer->data_size - object_size && + buffer->data_size >= object_size) + return object_size; + else + return 0; +} + +/** + * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. + * @b: binder_buffer containing the object + * @index: index in offset array at which the binder_buffer_object is + * located + * @start: points to the start of the offset array + * @num_valid: the number of valid offsets in the offset array + * + * Return: If @index is within the valid range of the offset array + * described by @start and @num_valid, and if there's a valid + * binder_buffer_object at the offset found in index @index + * of the offset array, that object is returned. Otherwise, + * %NULL is returned. + * Note that the offset found in index @index itself is not + * verified; this function assumes that @num_valid elements + * from @start were previously verified to have valid offsets. + */ +static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, + binder_size_t index, + binder_size_t *start, + binder_size_t num_valid) +{ + struct binder_buffer_object *buffer_obj; + binder_size_t *offp; + + if (index >= num_valid) + return NULL; + + offp = start + index; + buffer_obj = (struct binder_buffer_object *)(b->data + *offp); + if (buffer_obj->hdr.type != BINDER_TYPE_PTR) + return NULL; + + return buffer_obj; +} + +/** + * binder_validate_fixup() - validates pointer/fd fixups happen in order. + * @b: transaction buffer + * @objects_start start of objects buffer + * @buffer: binder_buffer_object in which to fix up + * @offset: start offset in @buffer to fix up + * @last_obj: last binder_buffer_object that we fixed up in + * @last_min_offset: minimum fixup offset in @last_obj + * + * Return: %true if a fixup in buffer @buffer at offset @offset is + * allowed. + * + * For safety reasons, we only allow fixups inside a buffer to happen + * at increasing offsets; additionally, we only allow fixup on the last + * buffer object that was verified, or one of its parents. + * + * Example of what is allowed: + * + * A + * B (parent = A, offset = 0) + * C (parent = A, offset = 16) + * D (parent = C, offset = 0) + * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) + * + * Examples of what is not allowed: + * + * Decreasing offsets within the same parent: + * A + * C (parent = A, offset = 16) + * B (parent = A, offset = 0) // decreasing offset within A + * + * Referring to a parent that wasn't the last object or any of its parents: + * A + * B (parent = A, offset = 0) + * C (parent = A, offset = 0) + * C (parent = A, offset = 16) + * D (parent = B, offset = 0) // B is not A or any of A's parents + */ +static bool binder_validate_fixup(struct binder_buffer *b, + binder_size_t *objects_start, + struct binder_buffer_object *buffer, + binder_size_t fixup_offset, + struct binder_buffer_object *last_obj, + binder_size_t last_min_offset) +{ + if (!last_obj) { + /* Nothing to fix up in */ + return false; + } + + while (last_obj != buffer) { + /* + * Safe to retrieve the parent of last_obj, since it + * was already previously verified by the driver. + */ + if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) + return false; + last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); + last_obj = (struct binder_buffer_object *) + (b->data + *(objects_start + last_obj->parent)); + } + return (fixup_offset >= last_min_offset); +} + +static void binder_transaction_buffer_release(struct binder_proc *proc, + struct binder_buffer *buffer, + binder_size_t *failed_at) +{ + binder_size_t *offp, *off_start, *off_end; + int debug_id = buffer->debug_id; + + binder_debug(BINDER_DEBUG_TRANSACTION, + "%d buffer release %d, size %zd-%zd, failed at %p\n", + proc->pid, buffer->debug_id, + buffer->data_size, buffer->offsets_size, failed_at); + + if (buffer->target_node) + binder_dec_node(buffer->target_node, 1, 0); + + off_start = (binder_size_t *)(buffer->data + + ALIGN(buffer->data_size, sizeof(void *))); + if (failed_at) + off_end = failed_at; + else + off_end = (void *)off_start + buffer->offsets_size; + for (offp = off_start; offp < off_end; offp++) { + struct binder_object_header *hdr; + size_t object_size = binder_validate_object(buffer, *offp); + + if (object_size == 0) { + pr_err("transaction release %d bad object at offset %lld, size %zd\n", + debug_id, (u64)*offp, buffer->data_size); + continue; + } + hdr = (struct binder_object_header *)(buffer->data + *offp); + switch (hdr->type) { + case BINDER_TYPE_BINDER: + case BINDER_TYPE_WEAK_BINDER: { + struct flat_binder_object *fp; + struct binder_node *node; + + fp = to_flat_binder_object(hdr); + node = binder_get_node(proc, fp->binder); + if (node == NULL) { + pr_err("transaction release %d bad node %016llx\n", + debug_id, (u64)fp->binder); + break; + } + binder_debug(BINDER_DEBUG_TRANSACTION, + " node %d u%016llx\n", + node->debug_id, (u64)node->ptr); + binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, + 0); + binder_put_node(node); + } break; + case BINDER_TYPE_HANDLE: + case BINDER_TYPE_WEAK_HANDLE: { + struct flat_binder_object *fp; + struct binder_ref_data rdata; + int ret; + + fp = to_flat_binder_object(hdr); + ret = binder_dec_ref_for_handle(proc, fp->handle, + hdr->type == BINDER_TYPE_HANDLE, &rdata); + + if (ret) { + pr_err("transaction release %d bad handle %d, ret = %d\n", + debug_id, fp->handle, ret); + break; + } + binder_debug(BINDER_DEBUG_TRANSACTION, + " ref %d desc %d\n", + rdata.debug_id, rdata.desc); + } break; + + case BINDER_TYPE_FD: { + struct binder_fd_object *fp = to_binder_fd_object(hdr); + + binder_debug(BINDER_DEBUG_TRANSACTION, + " fd %d\n", fp->fd); + if (failed_at) + task_close_fd(proc, fp->fd); + } break; + case BINDER_TYPE_PTR: + /* + * Nothing to do here, this will get cleaned up when the + * transaction buffer gets freed + */ + break; + case BINDER_TYPE_FDA: { + struct binder_fd_array_object *fda; + struct binder_buffer_object *parent; + uintptr_t parent_buffer; + u32 *fd_array; + size_t fd_index; + binder_size_t fd_buf_size; + + fda = to_binder_fd_array_object(hdr); + parent = binder_validate_ptr(buffer, fda->parent, + off_start, + offp - off_start); + if (!parent) { + pr_err("transaction release %d bad parent offset", + debug_id); + continue; + } + /* + * Since the parent was already fixed up, convert it + * back to kernel address space to access it + */ + parent_buffer = parent->buffer - + binder_alloc_get_user_buffer_offset( + &proc->alloc); + + fd_buf_size = sizeof(u32) * fda->num_fds; + if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { + pr_err("transaction release %d invalid number of fds (%lld)\n", + debug_id, (u64)fda->num_fds); + continue; + } + if (fd_buf_size > parent->length || + fda->parent_offset > parent->length - fd_buf_size) { + /* No space for all file descriptors here. */ + pr_err("transaction release %d not enough space for %lld fds in buffer\n", + debug_id, (u64)fda->num_fds); + continue; + } + fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); + for (fd_index = 0; fd_index < fda->num_fds; fd_index++) + task_close_fd(proc, fd_array[fd_index]); + } break; + default: + pr_err("transaction release %d bad object type %x\n", + debug_id, hdr->type); + break; + } + } +} + +static int binder_translate_binder(struct flat_binder_object *fp, + struct binder_transaction *t, + struct binder_thread *thread) +{ + struct binder_node *node; + struct binder_proc *proc = thread->proc; + struct binder_proc *target_proc = t->to_proc; + struct binder_ref_data rdata; + int ret = 0; + + node = binder_get_node(proc, fp->binder); + if (!node) { + node = binder_new_node(proc, fp); + if (!node) + return -ENOMEM; + } + if (fp->cookie != node->cookie) { + binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", + proc->pid, thread->pid, (u64)fp->binder, + node->debug_id, (u64)fp->cookie, + (u64)node->cookie); + ret = -EINVAL; + goto done; + } + if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { + ret = -EPERM; + goto done; + } + + ret = binder_inc_ref_for_node(target_proc, node, + fp->hdr.type == BINDER_TYPE_BINDER, + &thread->todo, &rdata); + if (ret) + goto done; + + if (fp->hdr.type == BINDER_TYPE_BINDER) + fp->hdr.type = BINDER_TYPE_HANDLE; + else + fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; + fp->binder = 0; + fp->handle = rdata.desc; + fp->cookie = 0; + + trace_binder_transaction_node_to_ref(t, node, &rdata); + binder_debug(BINDER_DEBUG_TRANSACTION, + " node %d u%016llx -> ref %d desc %d\n", + node->debug_id, (u64)node->ptr, + rdata.debug_id, rdata.desc); +done: + binder_put_node(node); + return ret; +} + +static int binder_translate_handle(struct flat_binder_object *fp, + struct binder_transaction *t, + struct binder_thread *thread) +{ + struct binder_proc *proc = thread->proc; + struct binder_proc *target_proc = t->to_proc; + struct binder_node *node; + struct binder_ref_data src_rdata; + int ret = 0; + + node = binder_get_node_from_ref(proc, fp->handle, + fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); + if (!node) { + binder_user_error("%d:%d got transaction with invalid handle, %d\n", + proc->pid, thread->pid, fp->handle); + return -EINVAL; + } + if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { + ret = -EPERM; + goto done; + } + + binder_node_lock(node); + if (node->proc == target_proc) { + if (fp->hdr.type == BINDER_TYPE_HANDLE) + fp->hdr.type = BINDER_TYPE_BINDER; + else + fp->hdr.type = BINDER_TYPE_WEAK_BINDER; + fp->binder = node->ptr; + fp->cookie = node->cookie; + if (node->proc) + binder_inner_proc_lock(node->proc); + binder_inc_node_nilocked(node, + fp->hdr.type == BINDER_TYPE_BINDER, + 0, NULL); + if (node->proc) + binder_inner_proc_unlock(node->proc); + trace_binder_transaction_ref_to_node(t, node, &src_rdata); + binder_debug(BINDER_DEBUG_TRANSACTION, + " ref %d desc %d -> node %d u%016llx\n", + src_rdata.debug_id, src_rdata.desc, node->debug_id, + (u64)node->ptr); + binder_node_unlock(node); + } else { + struct binder_ref_data dest_rdata; + + binder_node_unlock(node); + ret = binder_inc_ref_for_node(target_proc, node, + fp->hdr.type == BINDER_TYPE_HANDLE, + NULL, &dest_rdata); + if (ret) + goto done; + + fp->binder = 0; + fp->handle = dest_rdata.desc; + fp->cookie = 0; + trace_binder_transaction_ref_to_ref(t, node, &src_rdata, + &dest_rdata); + binder_debug(BINDER_DEBUG_TRANSACTION, + " ref %d desc %d -> ref %d desc %d (node %d)\n", + src_rdata.debug_id, src_rdata.desc, + dest_rdata.debug_id, dest_rdata.desc, + node->debug_id); + } +done: + binder_put_node(node); + return ret; +} + +static int binder_translate_fd(int fd, + struct binder_transaction *t, + struct binder_thread *thread, + struct binder_transaction *in_reply_to) +{ + struct binder_proc *proc = thread->proc; + struct binder_proc *target_proc = t->to_proc; + int target_fd; + struct file *file; + int ret; + bool target_allows_fd; + + if (in_reply_to) + target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); + else + target_allows_fd = t->buffer->target_node->accept_fds; + if (!target_allows_fd) { + binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", + proc->pid, thread->pid, + in_reply_to ? "reply" : "transaction", + fd); + ret = -EPERM; + goto err_fd_not_accepted; + } + + file = fget(fd); + if (!file) { + binder_user_error("%d:%d got transaction with invalid fd, %d\n", + proc->pid, thread->pid, fd); + ret = -EBADF; + goto err_fget; + } + ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); + if (ret < 0) { + ret = -EPERM; + goto err_security; + } + + target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); + if (target_fd < 0) { + ret = -ENOMEM; + goto err_get_unused_fd; + } + task_fd_install(target_proc, target_fd, file); + trace_binder_transaction_fd(t, fd, target_fd); + binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", + fd, target_fd); + + return target_fd; + +err_get_unused_fd: +err_security: + fput(file); +err_fget: +err_fd_not_accepted: + return ret; +} + +static int binder_translate_fd_array(struct binder_fd_array_object *fda, + struct binder_buffer_object *parent, + struct binder_transaction *t, + struct binder_thread *thread, + struct binder_transaction *in_reply_to) +{ + binder_size_t fdi, fd_buf_size, num_installed_fds; + int target_fd; + uintptr_t parent_buffer; + u32 *fd_array; + struct binder_proc *proc = thread->proc; + struct binder_proc *target_proc = t->to_proc; + + fd_buf_size = sizeof(u32) * fda->num_fds; + if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { + binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", + proc->pid, thread->pid, (u64)fda->num_fds); + return -EINVAL; + } + if (fd_buf_size > parent->length || + fda->parent_offset > parent->length - fd_buf_size) { + /* No space for all file descriptors here. */ + binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", + proc->pid, thread->pid, (u64)fda->num_fds); + return -EINVAL; + } + /* + * Since the parent was already fixed up, convert it + * back to the kernel address space to access it + */ + parent_buffer = parent->buffer - + binder_alloc_get_user_buffer_offset(&target_proc->alloc); + fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); + if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { + binder_user_error("%d:%d parent offset not aligned correctly.\n", + proc->pid, thread->pid); + return -EINVAL; + } + for (fdi = 0; fdi < fda->num_fds; fdi++) { + target_fd = binder_translate_fd(fd_array[fdi], t, thread, + in_reply_to); + if (target_fd < 0) + goto err_translate_fd_failed; + fd_array[fdi] = target_fd; + } + return 0; + +err_translate_fd_failed: + /* + * Failed to allocate fd or security error, free fds + * installed so far. + */ + num_installed_fds = fdi; + for (fdi = 0; fdi < num_installed_fds; fdi++) + task_close_fd(target_proc, fd_array[fdi]); + return target_fd; +} + +static int binder_fixup_parent(struct binder_transaction *t, + struct binder_thread *thread, + struct binder_buffer_object *bp, + binder_size_t *off_start, + binder_size_t num_valid, + struct binder_buffer_object *last_fixup_obj, + binder_size_t last_fixup_min_off) +{ + struct binder_buffer_object *parent; + u8 *parent_buffer; + struct binder_buffer *b = t->buffer; + struct binder_proc *proc = thread->proc; + struct binder_proc *target_proc = t->to_proc; + + if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) + return 0; + + parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); + if (!parent) { + binder_user_error("%d:%d got transaction with invalid parent offset or type\n", + proc->pid, thread->pid); + return -EINVAL; + } + + if (!binder_validate_fixup(b, off_start, + parent, bp->parent_offset, + last_fixup_obj, + last_fixup_min_off)) { + binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", + proc->pid, thread->pid); + return -EINVAL; + } + + if (parent->length < sizeof(binder_uintptr_t) || + bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { + /* No space for a pointer here! */ + binder_user_error("%d:%d got transaction with invalid parent offset\n", + proc->pid, thread->pid); + return -EINVAL; + } + parent_buffer = (u8 *)((uintptr_t)parent->buffer - + binder_alloc_get_user_buffer_offset( + &target_proc->alloc)); + *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; + + return 0; +} + +/** + * binder_proc_transaction() - sends a transaction to a process and wakes it up + * @t: transaction to send + * @proc: process to send the transaction to + * @thread: thread in @proc to send the transaction to (may be NULL) + * + * This function queues a transaction to the specified process. It will try + * to find a thread in the target process to handle the transaction and + * wake it up. If no thread is found, the work is queued to the proc + * waitqueue. + * + * If the @thread parameter is not NULL, the transaction is always queued + * to the waitlist of that specific thread. + * + * Return: true if the transactions was successfully queued + * false if the target process or thread is dead + */ +static bool binder_proc_transaction(struct binder_transaction *t, + struct binder_proc *proc, + struct binder_thread *thread) +{ + struct list_head *target_list = NULL; + struct binder_node *node = t->buffer->target_node; + bool oneway = !!(t->flags & TF_ONE_WAY); + bool wakeup = true; + + BUG_ON(!node); + binder_node_lock(node); + if (oneway) { + BUG_ON(thread); + if (node->has_async_transaction) { + target_list = &node->async_todo; + wakeup = false; + } else { + node->has_async_transaction = 1; + } + } + + binder_inner_proc_lock(proc); + + if (proc->is_dead || (thread && thread->is_dead)) { + binder_inner_proc_unlock(proc); + binder_node_unlock(node); + return false; + } + + if (!thread && !target_list) + thread = binder_select_thread_ilocked(proc); + + if (thread) + target_list = &thread->todo; + else if (!target_list) + target_list = &proc->todo; + else + BUG_ON(target_list != &node->async_todo); + + binder_enqueue_work_ilocked(&t->work, target_list); + + if (wakeup) + binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); + + binder_inner_proc_unlock(proc); + binder_node_unlock(node); + + return true; +} + +/** + * binder_get_node_refs_for_txn() - Get required refs on node for txn + * @node: struct binder_node for which to get refs + * @proc: returns @node->proc if valid + * @error: if no @proc then returns BR_DEAD_REPLY + * + * User-space normally keeps the node alive when creating a transaction + * since it has a reference to the target. The local strong ref keeps it + * alive if the sending process dies before the target process processes + * the transaction. If the source process is malicious or has a reference + * counting bug, relying on the local strong ref can fail. + * + * Since user-space can cause the local strong ref to go away, we also take + * a tmpref on the node to ensure it survives while we are constructing + * the transaction. We also need a tmpref on the proc while we are + * constructing the transaction, so we take that here as well. + * + * Return: The target_node with refs taken or NULL if no @node->proc is NULL. + * Also sets @proc if valid. If the @node->proc is NULL indicating that the + * target proc has died, @error is set to BR_DEAD_REPLY + */ +static struct binder_node *binder_get_node_refs_for_txn( + struct binder_node *node, + struct binder_proc **procp, + uint32_t *error) +{ + struct binder_node *target_node = NULL; + + binder_node_inner_lock(node); + if (node->proc) { + target_node = node; + binder_inc_node_nilocked(node, 1, 0, NULL); + binder_inc_node_tmpref_ilocked(node); + node->proc->tmp_ref++; + *procp = node->proc; + } else + *error = BR_DEAD_REPLY; + binder_node_inner_unlock(node); + + return target_node; +} + +static void binder_transaction(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_transaction_data *tr, int reply, + binder_size_t extra_buffers_size) +{ + int ret; + struct binder_transaction *t; + struct binder_work *tcomplete; + binder_size_t *offp, *off_end, *off_start; + binder_size_t off_min; + u8 *sg_bufp, *sg_buf_end; + struct binder_proc *target_proc = NULL; + struct binder_thread *target_thread = NULL; + struct binder_node *target_node = NULL; + struct binder_transaction *in_reply_to = NULL; + struct binder_transaction_log_entry *e; + uint32_t return_error = 0; + uint32_t return_error_param = 0; + uint32_t return_error_line = 0; + struct binder_buffer_object *last_fixup_obj = NULL; + binder_size_t last_fixup_min_off = 0; + struct binder_context *context = proc->context; + int t_debug_id = atomic_inc_return(&binder_last_id); + + e = binder_transaction_log_add(&binder_transaction_log); + e->debug_id = t_debug_id; + e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); + e->from_proc = proc->pid; + e->from_thread = thread->pid; + e->target_handle = tr->target.handle; + e->data_size = tr->data_size; + e->offsets_size = tr->offsets_size; + e->context_name = proc->context->name; + + if (reply) { + binder_inner_proc_lock(proc); + in_reply_to = thread->transaction_stack; + if (in_reply_to == NULL) { + binder_inner_proc_unlock(proc); + binder_user_error("%d:%d got reply transaction with no transaction stack\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; + goto err_empty_call_stack; + } + if (in_reply_to->to_thread != thread) { + spin_lock(&in_reply_to->lock); + binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", + proc->pid, thread->pid, in_reply_to->debug_id, + in_reply_to->to_proc ? + in_reply_to->to_proc->pid : 0, + in_reply_to->to_thread ? + in_reply_to->to_thread->pid : 0); + spin_unlock(&in_reply_to->lock); + binder_inner_proc_unlock(proc); + return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; + in_reply_to = NULL; + goto err_bad_call_stack; + } + thread->transaction_stack = in_reply_to->to_parent; + binder_inner_proc_unlock(proc); + binder_set_nice(in_reply_to->saved_priority); + target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); + if (target_thread == NULL) { + return_error = BR_DEAD_REPLY; + return_error_line = __LINE__; + goto err_dead_binder; + } + if (target_thread->transaction_stack != in_reply_to) { + binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", + proc->pid, thread->pid, + target_thread->transaction_stack ? + target_thread->transaction_stack->debug_id : 0, + in_reply_to->debug_id); + binder_inner_proc_unlock(target_thread->proc); + return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; + in_reply_to = NULL; + target_thread = NULL; + goto err_dead_binder; + } + target_proc = target_thread->proc; + target_proc->tmp_ref++; + binder_inner_proc_unlock(target_thread->proc); + } else { + if (tr->target.handle) { + struct binder_ref *ref; + + /* + * There must already be a strong ref + * on this node. If so, do a strong + * increment on the node to ensure it + * stays alive until the transaction is + * done. + */ + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, tr->target.handle, + true); + if (ref) { + target_node = binder_get_node_refs_for_txn( + ref->node, &target_proc, + &return_error); + } else { + binder_user_error("%d:%d got transaction to invalid handle\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + } + binder_proc_unlock(proc); + } else { + mutex_lock(&context->context_mgr_node_lock); + target_node = context->binder_context_mgr_node; + if (target_node) + target_node = binder_get_node_refs_for_txn( + target_node, &target_proc, + &return_error); + else + return_error = BR_DEAD_REPLY; + mutex_unlock(&context->context_mgr_node_lock); + } + if (!target_node) { + /* + * return_error is set above + */ + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_dead_binder; + } + e->to_node = target_node->debug_id; + if (security_binder_transaction(proc->tsk, + target_proc->tsk) < 0) { + return_error = BR_FAILED_REPLY; + return_error_param = -EPERM; + return_error_line = __LINE__; + goto err_invalid_target_handle; + } + binder_inner_proc_lock(proc); + if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { + struct binder_transaction *tmp; + + tmp = thread->transaction_stack; + if (tmp->to_thread != thread) { + spin_lock(&tmp->lock); + binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", + proc->pid, thread->pid, tmp->debug_id, + tmp->to_proc ? tmp->to_proc->pid : 0, + tmp->to_thread ? + tmp->to_thread->pid : 0); + spin_unlock(&tmp->lock); + binder_inner_proc_unlock(proc); + return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; + goto err_bad_call_stack; + } + while (tmp) { + struct binder_thread *from; + + spin_lock(&tmp->lock); + from = tmp->from; + if (from && from->proc == target_proc) { + atomic_inc(&from->tmp_ref); + target_thread = from; + spin_unlock(&tmp->lock); + break; + } + spin_unlock(&tmp->lock); + tmp = tmp->from_parent; + } + } + binder_inner_proc_unlock(proc); + } + if (target_thread) + e->to_thread = target_thread->pid; + e->to_proc = target_proc->pid; + + /* TODO: reuse incoming transaction for reply */ + t = kzalloc(sizeof(*t), GFP_KERNEL); + if (t == NULL) { + return_error = BR_FAILED_REPLY; + return_error_param = -ENOMEM; + return_error_line = __LINE__; + goto err_alloc_t_failed; + } + binder_stats_created(BINDER_STAT_TRANSACTION); + spin_lock_init(&t->lock); + + tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); + if (tcomplete == NULL) { + return_error = BR_FAILED_REPLY; + return_error_param = -ENOMEM; + return_error_line = __LINE__; + goto err_alloc_tcomplete_failed; + } + binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); + + t->debug_id = t_debug_id; + + if (reply) + binder_debug(BINDER_DEBUG_TRANSACTION, + "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", + proc->pid, thread->pid, t->debug_id, + target_proc->pid, target_thread->pid, + (u64)tr->data.ptr.buffer, + (u64)tr->data.ptr.offsets, + (u64)tr->data_size, (u64)tr->offsets_size, + (u64)extra_buffers_size); + else + binder_debug(BINDER_DEBUG_TRANSACTION, + "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", + proc->pid, thread->pid, t->debug_id, + target_proc->pid, target_node->debug_id, + (u64)tr->data.ptr.buffer, + (u64)tr->data.ptr.offsets, + (u64)tr->data_size, (u64)tr->offsets_size, + (u64)extra_buffers_size); + + if (!reply && !(tr->flags & TF_ONE_WAY)) + t->from = thread; + else + t->from = NULL; + t->sender_euid = task_euid(proc->tsk); + t->to_proc = target_proc; + t->to_thread = target_thread; + t->code = tr->code; + t->flags = tr->flags; + t->priority = task_nice(current); + + trace_binder_transaction(reply, t, target_node); + + t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, + tr->offsets_size, extra_buffers_size, + !reply && (t->flags & TF_ONE_WAY)); + if (IS_ERR(t->buffer)) { + /* + * -ESRCH indicates VMA cleared. The target is dying. + */ + return_error_param = PTR_ERR(t->buffer); + return_error = return_error_param == -ESRCH ? + BR_DEAD_REPLY : BR_FAILED_REPLY; + return_error_line = __LINE__; + t->buffer = NULL; + goto err_binder_alloc_buf_failed; + } + t->buffer->allow_user_free = 0; + t->buffer->debug_id = t->debug_id; + t->buffer->transaction = t; + t->buffer->target_node = target_node; + trace_binder_transaction_alloc_buf(t->buffer); + off_start = (binder_size_t *)(t->buffer->data + + ALIGN(tr->data_size, sizeof(void *))); + offp = off_start; + + if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) + tr->data.ptr.buffer, tr->data_size)) { + binder_user_error("%d:%d got transaction with invalid data ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + if (copy_from_user(offp, (const void __user *)(uintptr_t) + tr->data.ptr.offsets, tr->offsets_size)) { + binder_user_error("%d:%d got transaction with invalid offsets ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { + binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", + proc->pid, thread->pid, (u64)tr->offsets_size); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_offset; + } + if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { + binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", + proc->pid, thread->pid, + (u64)extra_buffers_size); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_offset; + } + off_end = (void *)off_start + tr->offsets_size; + sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); + sg_buf_end = sg_bufp + extra_buffers_size; + off_min = 0; + for (; offp < off_end; offp++) { + struct binder_object_header *hdr; + size_t object_size = binder_validate_object(t->buffer, *offp); + + if (object_size == 0 || *offp < off_min) { + binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", + proc->pid, thread->pid, (u64)*offp, + (u64)off_min, + (u64)t->buffer->data_size); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_offset; + } + + hdr = (struct binder_object_header *)(t->buffer->data + *offp); + off_min = *offp + object_size; + switch (hdr->type) { + case BINDER_TYPE_BINDER: + case BINDER_TYPE_WEAK_BINDER: { + struct flat_binder_object *fp; + + fp = to_flat_binder_object(hdr); + ret = binder_translate_binder(fp, t, thread); + if (ret < 0) { + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_translate_failed; + } + } break; + case BINDER_TYPE_HANDLE: + case BINDER_TYPE_WEAK_HANDLE: { + struct flat_binder_object *fp; + + fp = to_flat_binder_object(hdr); + ret = binder_translate_handle(fp, t, thread); + if (ret < 0) { + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_translate_failed; + } + } break; + + case BINDER_TYPE_FD: { + struct binder_fd_object *fp = to_binder_fd_object(hdr); + int target_fd = binder_translate_fd(fp->fd, t, thread, + in_reply_to); + + if (target_fd < 0) { + return_error = BR_FAILED_REPLY; + return_error_param = target_fd; + return_error_line = __LINE__; + goto err_translate_failed; + } + fp->pad_binder = 0; + fp->fd = target_fd; + } break; + case BINDER_TYPE_FDA: { + struct binder_fd_array_object *fda = + to_binder_fd_array_object(hdr); + struct binder_buffer_object *parent = + binder_validate_ptr(t->buffer, fda->parent, + off_start, + offp - off_start); + if (!parent) { + binder_user_error("%d:%d got transaction with invalid parent offset or type\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_parent; + } + if (!binder_validate_fixup(t->buffer, off_start, + parent, fda->parent_offset, + last_fixup_obj, + last_fixup_min_off)) { + binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_parent; + } + ret = binder_translate_fd_array(fda, parent, t, thread, + in_reply_to); + if (ret < 0) { + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_translate_failed; + } + last_fixup_obj = parent; + last_fixup_min_off = + fda->parent_offset + sizeof(u32) * fda->num_fds; + } break; + case BINDER_TYPE_PTR: { + struct binder_buffer_object *bp = + to_binder_buffer_object(hdr); + size_t buf_left = sg_buf_end - sg_bufp; + + if (bp->length > buf_left) { + binder_user_error("%d:%d got transaction with too large buffer\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_offset; + } + if (copy_from_user(sg_bufp, + (const void __user *)(uintptr_t) + bp->buffer, bp->length)) { + binder_user_error("%d:%d got transaction with invalid offsets ptr\n", + proc->pid, thread->pid); + return_error_param = -EFAULT; + return_error = BR_FAILED_REPLY; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + /* Fixup buffer pointer to target proc address space */ + bp->buffer = (uintptr_t)sg_bufp + + binder_alloc_get_user_buffer_offset( + &target_proc->alloc); + sg_bufp += ALIGN(bp->length, sizeof(u64)); + + ret = binder_fixup_parent(t, thread, bp, off_start, + offp - off_start, + last_fixup_obj, + last_fixup_min_off); + if (ret < 0) { + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_translate_failed; + } + last_fixup_obj = bp; + last_fixup_min_off = 0; + } break; + default: + binder_user_error("%d:%d got transaction with invalid object type, %x\n", + proc->pid, thread->pid, hdr->type); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_object_type; + } + } + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + binder_enqueue_work(proc, tcomplete, &thread->todo); + t->work.type = BINDER_WORK_TRANSACTION; + + if (reply) { + binder_inner_proc_lock(target_proc); + if (target_thread->is_dead) { + binder_inner_proc_unlock(target_proc); + goto err_dead_proc_or_thread; + } + BUG_ON(t->buffer->async_transaction != 0); + binder_pop_transaction_ilocked(target_thread, in_reply_to); + binder_enqueue_work_ilocked(&t->work, &target_thread->todo); + binder_inner_proc_unlock(target_proc); + wake_up_interruptible_sync(&target_thread->wait); + binder_free_transaction(in_reply_to); + } else if (!(t->flags & TF_ONE_WAY)) { + BUG_ON(t->buffer->async_transaction != 0); + binder_inner_proc_lock(proc); + t->need_reply = 1; + t->from_parent = thread->transaction_stack; + thread->transaction_stack = t; + binder_inner_proc_unlock(proc); + if (!binder_proc_transaction(t, target_proc, target_thread)) { + binder_inner_proc_lock(proc); + binder_pop_transaction_ilocked(thread, t); + binder_inner_proc_unlock(proc); + goto err_dead_proc_or_thread; + } + } else { + BUG_ON(target_node == NULL); + BUG_ON(t->buffer->async_transaction != 1); + if (!binder_proc_transaction(t, target_proc, NULL)) + goto err_dead_proc_or_thread; + } + if (target_thread) + binder_thread_dec_tmpref(target_thread); + binder_proc_dec_tmpref(target_proc); + if (target_node) + binder_dec_node_tmpref(target_node); + /* + * write barrier to synchronize with initialization + * of log entry + */ + smp_wmb(); + WRITE_ONCE(e->debug_id_done, t_debug_id); + return; + +err_dead_proc_or_thread: + return_error = BR_DEAD_REPLY; + return_error_line = __LINE__; + binder_dequeue_work(proc, tcomplete); +err_translate_failed: +err_bad_object_type: +err_bad_offset: +err_bad_parent: +err_copy_data_failed: + trace_binder_transaction_failed_buffer_release(t->buffer); + binder_transaction_buffer_release(target_proc, t->buffer, offp); + if (target_node) + binder_dec_node_tmpref(target_node); + target_node = NULL; + t->buffer->transaction = NULL; + binder_alloc_free_buf(&target_proc->alloc, t->buffer); +err_binder_alloc_buf_failed: + kfree(tcomplete); + binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); +err_alloc_tcomplete_failed: + kfree(t); + binder_stats_deleted(BINDER_STAT_TRANSACTION); +err_alloc_t_failed: +err_bad_call_stack: +err_empty_call_stack: +err_dead_binder: +err_invalid_target_handle: + if (target_thread) + binder_thread_dec_tmpref(target_thread); + if (target_proc) + binder_proc_dec_tmpref(target_proc); + if (target_node) { + binder_dec_node(target_node, 1, 0); + binder_dec_node_tmpref(target_node); + } + + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", + proc->pid, thread->pid, return_error, return_error_param, + (u64)tr->data_size, (u64)tr->offsets_size, + return_error_line); + + { + struct binder_transaction_log_entry *fe; + + e->return_error = return_error; + e->return_error_param = return_error_param; + e->return_error_line = return_error_line; + fe = binder_transaction_log_add(&binder_transaction_log_failed); + *fe = *e; + /* + * write barrier to synchronize with initialization + * of log entry + */ + smp_wmb(); + WRITE_ONCE(e->debug_id_done, t_debug_id); + WRITE_ONCE(fe->debug_id_done, t_debug_id); + } + + BUG_ON(thread->return_error.cmd != BR_OK); + if (in_reply_to) { + thread->return_error.cmd = BR_TRANSACTION_COMPLETE; + binder_enqueue_work(thread->proc, + &thread->return_error.work, + &thread->todo); + binder_send_failed_reply(in_reply_to, return_error); + } else { + thread->return_error.cmd = return_error; + binder_enqueue_work(thread->proc, + &thread->return_error.work, + &thread->todo); + } +} + +static int binder_thread_write(struct binder_proc *proc, + struct binder_thread *thread, + binder_uintptr_t binder_buffer, size_t size, + binder_size_t *consumed) +{ + uint32_t cmd; + struct binder_context *context = proc->context; + void __user *buffer = (void __user *)(uintptr_t)binder_buffer; + void __user *ptr = buffer + *consumed; + void __user *end = buffer + size; + + while (ptr < end && thread->return_error.cmd == BR_OK) { + int ret; + + if (get_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + trace_binder_command(cmd); + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { + atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); + atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); + atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); + } + switch (cmd) { + case BC_INCREFS: + case BC_ACQUIRE: + case BC_RELEASE: + case BC_DECREFS: { + uint32_t target; + const char *debug_string; + bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; + bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; + struct binder_ref_data rdata; + + if (get_user(target, (uint32_t __user *)ptr)) + return -EFAULT; + + ptr += sizeof(uint32_t); + ret = -1; + if (increment && !target) { + struct binder_node *ctx_mgr_node; + mutex_lock(&context->context_mgr_node_lock); + ctx_mgr_node = context->binder_context_mgr_node; + if (ctx_mgr_node) + ret = binder_inc_ref_for_node( + proc, ctx_mgr_node, + strong, NULL, &rdata); + mutex_unlock(&context->context_mgr_node_lock); + } + if (ret) + ret = binder_update_ref_for_handle( + proc, target, increment, strong, + &rdata); + if (!ret && rdata.desc != target) { + binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", + proc->pid, thread->pid, + target, rdata.desc); + } + switch (cmd) { + case BC_INCREFS: + debug_string = "IncRefs"; + break; + case BC_ACQUIRE: + debug_string = "Acquire"; + break; + case BC_RELEASE: + debug_string = "Release"; + break; + case BC_DECREFS: + default: + debug_string = "DecRefs"; + break; + } + if (ret) { + binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", + proc->pid, thread->pid, debug_string, + strong, target, ret); + break; + } + binder_debug(BINDER_DEBUG_USER_REFS, + "%d:%d %s ref %d desc %d s %d w %d\n", + proc->pid, thread->pid, debug_string, + rdata.debug_id, rdata.desc, rdata.strong, + rdata.weak); + break; + } + case BC_INCREFS_DONE: + case BC_ACQUIRE_DONE: { + binder_uintptr_t node_ptr; + binder_uintptr_t cookie; + struct binder_node *node; + bool free_node; + + if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + node = binder_get_node(proc, node_ptr); + if (node == NULL) { + binder_user_error("%d:%d %s u%016llx no match\n", + proc->pid, thread->pid, + cmd == BC_INCREFS_DONE ? + "BC_INCREFS_DONE" : + "BC_ACQUIRE_DONE", + (u64)node_ptr); + break; + } + if (cookie != node->cookie) { + binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", + proc->pid, thread->pid, + cmd == BC_INCREFS_DONE ? + "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", + (u64)node_ptr, node->debug_id, + (u64)cookie, (u64)node->cookie); + binder_put_node(node); + break; + } + binder_node_inner_lock(node); + if (cmd == BC_ACQUIRE_DONE) { + if (node->pending_strong_ref == 0) { + binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", + proc->pid, thread->pid, + node->debug_id); + binder_node_inner_unlock(node); + binder_put_node(node); + break; + } + node->pending_strong_ref = 0; + } else { + if (node->pending_weak_ref == 0) { + binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", + proc->pid, thread->pid, + node->debug_id); + binder_node_inner_unlock(node); + binder_put_node(node); + break; + } + node->pending_weak_ref = 0; + } + free_node = binder_dec_node_nilocked(node, + cmd == BC_ACQUIRE_DONE, 0); + WARN_ON(free_node); + binder_debug(BINDER_DEBUG_USER_REFS, + "%d:%d %s node %d ls %d lw %d tr %d\n", + proc->pid, thread->pid, + cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", + node->debug_id, node->local_strong_refs, + node->local_weak_refs, node->tmp_refs); + binder_node_inner_unlock(node); + binder_put_node(node); + break; + } + case BC_ATTEMPT_ACQUIRE: + pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); + return -EINVAL; + case BC_ACQUIRE_RESULT: + pr_err("BC_ACQUIRE_RESULT not supported\n"); + return -EINVAL; + + case BC_FREE_BUFFER: { + binder_uintptr_t data_ptr; + struct binder_buffer *buffer; + + if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + + buffer = binder_alloc_prepare_to_free(&proc->alloc, + data_ptr); + if (buffer == NULL) { + binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", + proc->pid, thread->pid, (u64)data_ptr); + break; + } + if (!buffer->allow_user_free) { + binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", + proc->pid, thread->pid, (u64)data_ptr); + break; + } + binder_debug(BINDER_DEBUG_FREE_BUFFER, + "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", + proc->pid, thread->pid, (u64)data_ptr, + buffer->debug_id, + buffer->transaction ? "active" : "finished"); + + if (buffer->transaction) { + buffer->transaction->buffer = NULL; + buffer->transaction = NULL; + } + if (buffer->async_transaction && buffer->target_node) { + struct binder_node *buf_node; + struct binder_work *w; + + buf_node = buffer->target_node; + binder_node_inner_lock(buf_node); + BUG_ON(!buf_node->has_async_transaction); + BUG_ON(buf_node->proc != proc); + w = binder_dequeue_work_head_ilocked( + &buf_node->async_todo); + if (!w) { + buf_node->has_async_transaction = 0; + } else { + binder_enqueue_work_ilocked( + w, &proc->todo); + binder_wakeup_proc_ilocked(proc); + } + binder_node_inner_unlock(buf_node); + } + trace_binder_transaction_buffer_release(buffer); + binder_transaction_buffer_release(proc, buffer, NULL); + binder_alloc_free_buf(&proc->alloc, buffer); + break; + } + + case BC_TRANSACTION_SG: + case BC_REPLY_SG: { + struct binder_transaction_data_sg tr; + + if (copy_from_user(&tr, ptr, sizeof(tr))) + return -EFAULT; + ptr += sizeof(tr); + binder_transaction(proc, thread, &tr.transaction_data, + cmd == BC_REPLY_SG, tr.buffers_size); + break; + } + case BC_TRANSACTION: + case BC_REPLY: { + struct binder_transaction_data tr; + + if (copy_from_user(&tr, ptr, sizeof(tr))) + return -EFAULT; + ptr += sizeof(tr); + binder_transaction(proc, thread, &tr, + cmd == BC_REPLY, 0); + break; + } + + case BC_REGISTER_LOOPER: + binder_debug(BINDER_DEBUG_THREADS, + "%d:%d BC_REGISTER_LOOPER\n", + proc->pid, thread->pid); + binder_inner_proc_lock(proc); + if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { + thread->looper |= BINDER_LOOPER_STATE_INVALID; + binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", + proc->pid, thread->pid); + } else if (proc->requested_threads == 0) { + thread->looper |= BINDER_LOOPER_STATE_INVALID; + binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", + proc->pid, thread->pid); + } else { + proc->requested_threads--; + proc->requested_threads_started++; + } + thread->looper |= BINDER_LOOPER_STATE_REGISTERED; + binder_inner_proc_unlock(proc); + break; + case BC_ENTER_LOOPER: + binder_debug(BINDER_DEBUG_THREADS, + "%d:%d BC_ENTER_LOOPER\n", + proc->pid, thread->pid); + if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { + thread->looper |= BINDER_LOOPER_STATE_INVALID; + binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", + proc->pid, thread->pid); + } + thread->looper |= BINDER_LOOPER_STATE_ENTERED; + break; + case BC_EXIT_LOOPER: + binder_debug(BINDER_DEBUG_THREADS, + "%d:%d BC_EXIT_LOOPER\n", + proc->pid, thread->pid); + thread->looper |= BINDER_LOOPER_STATE_EXITED; + break; + + case BC_REQUEST_DEATH_NOTIFICATION: + case BC_CLEAR_DEATH_NOTIFICATION: { + uint32_t target; + binder_uintptr_t cookie; + struct binder_ref *ref; + struct binder_ref_death *death = NULL; + + if (get_user(target, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { + /* + * Allocate memory for death notification + * before taking lock + */ + death = kzalloc(sizeof(*death), GFP_KERNEL); + if (death == NULL) { + WARN_ON(thread->return_error.cmd != + BR_OK); + thread->return_error.cmd = BR_ERROR; + binder_enqueue_work( + thread->proc, + &thread->return_error.work, + &thread->todo); + binder_debug( + BINDER_DEBUG_FAILED_TRANSACTION, + "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", + proc->pid, thread->pid); + break; + } + } + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, target, false); + if (ref == NULL) { + binder_user_error("%d:%d %s invalid ref %d\n", + proc->pid, thread->pid, + cmd == BC_REQUEST_DEATH_NOTIFICATION ? + "BC_REQUEST_DEATH_NOTIFICATION" : + "BC_CLEAR_DEATH_NOTIFICATION", + target); + binder_proc_unlock(proc); + kfree(death); + break; + } + + binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, + "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", + proc->pid, thread->pid, + cmd == BC_REQUEST_DEATH_NOTIFICATION ? + "BC_REQUEST_DEATH_NOTIFICATION" : + "BC_CLEAR_DEATH_NOTIFICATION", + (u64)cookie, ref->data.debug_id, + ref->data.desc, ref->data.strong, + ref->data.weak, ref->node->debug_id); + + binder_node_lock(ref->node); + if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { + if (ref->death) { + binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", + proc->pid, thread->pid); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + kfree(death); + break; + } + binder_stats_created(BINDER_STAT_DEATH); + INIT_LIST_HEAD(&death->work.entry); + death->cookie = cookie; + ref->death = death; + if (ref->node->proc == NULL) { + ref->death->work.type = BINDER_WORK_DEAD_BINDER; + + binder_inner_proc_lock(proc); + binder_enqueue_work_ilocked( + &ref->death->work, &proc->todo); + binder_wakeup_proc_ilocked(proc); + binder_inner_proc_unlock(proc); + } + } else { + if (ref->death == NULL) { + binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", + proc->pid, thread->pid); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + break; + } + death = ref->death; + if (death->cookie != cookie) { + binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", + proc->pid, thread->pid, + (u64)death->cookie, + (u64)cookie); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + break; + } + ref->death = NULL; + binder_inner_proc_lock(proc); + if (list_empty(&death->work.entry)) { + death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; + if (thread->looper & + (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED)) + binder_enqueue_work_ilocked( + &death->work, + &thread->todo); + else { + binder_enqueue_work_ilocked( + &death->work, + &proc->todo); + binder_wakeup_proc_ilocked( + proc); + } + } else { + BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); + death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; + } + binder_inner_proc_unlock(proc); + } + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + } break; + case BC_DEAD_BINDER_DONE: { + struct binder_work *w; + binder_uintptr_t cookie; + struct binder_ref_death *death = NULL; + + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + + ptr += sizeof(cookie); + binder_inner_proc_lock(proc); + list_for_each_entry(w, &proc->delivered_death, + entry) { + struct binder_ref_death *tmp_death = + container_of(w, + struct binder_ref_death, + work); + + if (tmp_death->cookie == cookie) { + death = tmp_death; + break; + } + } + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", + proc->pid, thread->pid, (u64)cookie, + death); + if (death == NULL) { + binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", + proc->pid, thread->pid, (u64)cookie); + binder_inner_proc_unlock(proc); + break; + } + binder_dequeue_work_ilocked(&death->work); + if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { + death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; + if (thread->looper & + (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED)) + binder_enqueue_work_ilocked( + &death->work, &thread->todo); + else { + binder_enqueue_work_ilocked( + &death->work, + &proc->todo); + binder_wakeup_proc_ilocked(proc); + } + } + binder_inner_proc_unlock(proc); + } break; + + default: + pr_err("%d:%d unknown command %d\n", + proc->pid, thread->pid, cmd); + return -EINVAL; + } + *consumed = ptr - buffer; + } + return 0; +} + +static void binder_stat_br(struct binder_proc *proc, + struct binder_thread *thread, uint32_t cmd) +{ + trace_binder_return(cmd); + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { + atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); + atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); + atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); + } +} + +static int binder_put_node_cmd(struct binder_proc *proc, + struct binder_thread *thread, + void __user **ptrp, + binder_uintptr_t node_ptr, + binder_uintptr_t node_cookie, + int node_debug_id, + uint32_t cmd, const char *cmd_name) +{ + void __user *ptr = *ptrp; + + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + + if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + + if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + + binder_stat_br(proc, thread, cmd); + binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", + proc->pid, thread->pid, cmd_name, node_debug_id, + (u64)node_ptr, (u64)node_cookie); + + *ptrp = ptr; + return 0; +} + +static int binder_wait_for_work(struct binder_thread *thread, + bool do_proc_work) +{ + DEFINE_WAIT(wait); + struct binder_proc *proc = thread->proc; + int ret = 0; + + freezer_do_not_count(); + binder_inner_proc_lock(proc); + for (;;) { + prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); + if (binder_has_work_ilocked(thread, do_proc_work)) + break; + if (do_proc_work) + list_add(&thread->waiting_thread_node, + &proc->waiting_threads); + binder_inner_proc_unlock(proc); + schedule(); + binder_inner_proc_lock(proc); + list_del_init(&thread->waiting_thread_node); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + finish_wait(&thread->wait, &wait); + binder_inner_proc_unlock(proc); + freezer_count(); + + return ret; +} + +static int binder_thread_read(struct binder_proc *proc, + struct binder_thread *thread, + binder_uintptr_t binder_buffer, size_t size, + binder_size_t *consumed, int non_block) +{ + void __user *buffer = (void __user *)(uintptr_t)binder_buffer; + void __user *ptr = buffer + *consumed; + void __user *end = buffer + size; + + int ret = 0; + int wait_for_proc_work; + + if (*consumed == 0) { + if (put_user(BR_NOOP, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + } + +retry: + binder_inner_proc_lock(proc); + wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); + binder_inner_proc_unlock(proc); + + thread->looper |= BINDER_LOOPER_STATE_WAITING; + + trace_binder_wait_for_work(wait_for_proc_work, + !!thread->transaction_stack, + !binder_worklist_empty(proc, &thread->todo)); + if (wait_for_proc_work) { + if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED))) { + binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", + proc->pid, thread->pid, thread->looper); + wait_event_interruptible(binder_user_error_wait, + binder_stop_on_user_error < 2); + } + binder_set_nice(proc->default_priority); + } + + if (non_block) { + if (!binder_has_work(thread, wait_for_proc_work)) + ret = -EAGAIN; + } else { + ret = binder_wait_for_work(thread, wait_for_proc_work); + } + + thread->looper &= ~BINDER_LOOPER_STATE_WAITING; + + if (ret) + return ret; + + while (1) { + uint32_t cmd; + struct binder_transaction_data tr; + struct binder_work *w = NULL; + struct list_head *list = NULL; + struct binder_transaction *t = NULL; + struct binder_thread *t_from; + + binder_inner_proc_lock(proc); + if (!binder_worklist_empty_ilocked(&thread->todo)) + list = &thread->todo; + else if (!binder_worklist_empty_ilocked(&proc->todo) && + wait_for_proc_work) + list = &proc->todo; + else { + binder_inner_proc_unlock(proc); + + /* no data added */ + if (ptr - buffer == 4 && !thread->looper_need_return) + goto retry; + break; + } + + if (end - ptr < sizeof(tr) + 4) { + binder_inner_proc_unlock(proc); + break; + } + w = binder_dequeue_work_head_ilocked(list); + + switch (w->type) { + case BINDER_WORK_TRANSACTION: { + binder_inner_proc_unlock(proc); + t = container_of(w, struct binder_transaction, work); + } break; + case BINDER_WORK_RETURN_ERROR: { + struct binder_error *e = container_of( + w, struct binder_error, work); + + WARN_ON(e->cmd == BR_OK); + binder_inner_proc_unlock(proc); + if (put_user(e->cmd, (uint32_t __user *)ptr)) + return -EFAULT; + e->cmd = BR_OK; + ptr += sizeof(uint32_t); + + binder_stat_br(proc, thread, e->cmd); + } break; + case BINDER_WORK_TRANSACTION_COMPLETE: { + binder_inner_proc_unlock(proc); + cmd = BR_TRANSACTION_COMPLETE; + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + + binder_stat_br(proc, thread, cmd); + binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, + "%d:%d BR_TRANSACTION_COMPLETE\n", + proc->pid, thread->pid); + kfree(w); + binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); + } break; + case BINDER_WORK_NODE: { + struct binder_node *node = container_of(w, struct binder_node, work); + int strong, weak; + binder_uintptr_t node_ptr = node->ptr; + binder_uintptr_t node_cookie = node->cookie; + int node_debug_id = node->debug_id; + int has_weak_ref; + int has_strong_ref; + void __user *orig_ptr = ptr; + + BUG_ON(proc != node->proc); + strong = node->internal_strong_refs || + node->local_strong_refs; + weak = !hlist_empty(&node->refs) || + node->local_weak_refs || + node->tmp_refs || strong; + has_strong_ref = node->has_strong_ref; + has_weak_ref = node->has_weak_ref; + + if (weak && !has_weak_ref) { + node->has_weak_ref = 1; + node->pending_weak_ref = 1; + node->local_weak_refs++; + } + if (strong && !has_strong_ref) { + node->has_strong_ref = 1; + node->pending_strong_ref = 1; + node->local_strong_refs++; + } + if (!strong && has_strong_ref) + node->has_strong_ref = 0; + if (!weak && has_weak_ref) + node->has_weak_ref = 0; + if (!weak && !strong) { + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "%d:%d node %d u%016llx c%016llx deleted\n", + proc->pid, thread->pid, + node_debug_id, + (u64)node_ptr, + (u64)node_cookie); + rb_erase(&node->rb_node, &proc->nodes); + binder_inner_proc_unlock(proc); + binder_node_lock(node); + /* + * Acquire the node lock before freeing the + * node to serialize with other threads that + * may have been holding the node lock while + * decrementing this node (avoids race where + * this thread frees while the other thread + * is unlocking the node after the final + * decrement) + */ + binder_node_unlock(node); + binder_free_node(node); + } else + binder_inner_proc_unlock(proc); + + if (weak && !has_weak_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_INCREFS, "BR_INCREFS"); + if (!ret && strong && !has_strong_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_ACQUIRE, "BR_ACQUIRE"); + if (!ret && !strong && has_strong_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_RELEASE, "BR_RELEASE"); + if (!ret && !weak && has_weak_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_DECREFS, "BR_DECREFS"); + if (orig_ptr == ptr) + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "%d:%d node %d u%016llx c%016llx state unchanged\n", + proc->pid, thread->pid, + node_debug_id, + (u64)node_ptr, + (u64)node_cookie); + if (ret) + return ret; + } break; + case BINDER_WORK_DEAD_BINDER: + case BINDER_WORK_DEAD_BINDER_AND_CLEAR: + case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { + struct binder_ref_death *death; + uint32_t cmd; + binder_uintptr_t cookie; + + death = container_of(w, struct binder_ref_death, work); + if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) + cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; + else + cmd = BR_DEAD_BINDER; + cookie = death->cookie; + + binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, + "%d:%d %s %016llx\n", + proc->pid, thread->pid, + cmd == BR_DEAD_BINDER ? + "BR_DEAD_BINDER" : + "BR_CLEAR_DEATH_NOTIFICATION_DONE", + (u64)cookie); + if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { + binder_inner_proc_unlock(proc); + kfree(death); + binder_stats_deleted(BINDER_STAT_DEATH); + } else { + binder_enqueue_work_ilocked( + w, &proc->delivered_death); + binder_inner_proc_unlock(proc); + } + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (put_user(cookie, + (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + binder_stat_br(proc, thread, cmd); + if (cmd == BR_DEAD_BINDER) + goto done; /* DEAD_BINDER notifications can cause transactions */ + } break; + } + + if (!t) + continue; + + BUG_ON(t->buffer == NULL); + if (t->buffer->target_node) { + struct binder_node *target_node = t->buffer->target_node; + + tr.target.ptr = target_node->ptr; + tr.cookie = target_node->cookie; + t->saved_priority = task_nice(current); + if (t->priority < target_node->min_priority && + !(t->flags & TF_ONE_WAY)) + binder_set_nice(t->priority); + else if (!(t->flags & TF_ONE_WAY) || + t->saved_priority > target_node->min_priority) + binder_set_nice(target_node->min_priority); + cmd = BR_TRANSACTION; + } else { + tr.target.ptr = 0; + tr.cookie = 0; + cmd = BR_REPLY; + } + tr.code = t->code; + tr.flags = t->flags; + tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); + + t_from = binder_get_txn_from(t); + if (t_from) { + struct task_struct *sender = t_from->proc->tsk; + + tr.sender_pid = task_tgid_nr_ns(sender, + task_active_pid_ns(current)); + } else { + tr.sender_pid = 0; + } + + tr.data_size = t->buffer->data_size; + tr.offsets_size = t->buffer->offsets_size; + tr.data.ptr.buffer = (binder_uintptr_t) + ((uintptr_t)t->buffer->data + + binder_alloc_get_user_buffer_offset(&proc->alloc)); + tr.data.ptr.offsets = tr.data.ptr.buffer + + ALIGN(t->buffer->data_size, + sizeof(void *)); + + if (put_user(cmd, (uint32_t __user *)ptr)) { + if (t_from) + binder_thread_dec_tmpref(t_from); + + binder_cleanup_transaction(t, "put_user failed", + BR_FAILED_REPLY); + + return -EFAULT; + } + ptr += sizeof(uint32_t); + if (copy_to_user(ptr, &tr, sizeof(tr))) { + if (t_from) + binder_thread_dec_tmpref(t_from); + + binder_cleanup_transaction(t, "copy_to_user failed", + BR_FAILED_REPLY); + + return -EFAULT; + } + ptr += sizeof(tr); + + trace_binder_transaction_received(t); + binder_stat_br(proc, thread, cmd); + binder_debug(BINDER_DEBUG_TRANSACTION, + "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", + proc->pid, thread->pid, + (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : + "BR_REPLY", + t->debug_id, t_from ? t_from->proc->pid : 0, + t_from ? t_from->pid : 0, cmd, + t->buffer->data_size, t->buffer->offsets_size, + (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); + + if (t_from) + binder_thread_dec_tmpref(t_from); + t->buffer->allow_user_free = 1; + if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { + binder_inner_proc_lock(thread->proc); + t->to_parent = thread->transaction_stack; + t->to_thread = thread; + thread->transaction_stack = t; + binder_inner_proc_unlock(thread->proc); + } else { + binder_free_transaction(t); + } + break; + } + +done: + + *consumed = ptr - buffer; + binder_inner_proc_lock(proc); + if (proc->requested_threads == 0 && + list_empty(&thread->proc->waiting_threads) && + proc->requested_threads_started < proc->max_threads && + (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ + /*spawn a new thread if we leave this out */) { + proc->requested_threads++; + binder_inner_proc_unlock(proc); + binder_debug(BINDER_DEBUG_THREADS, + "%d:%d BR_SPAWN_LOOPER\n", + proc->pid, thread->pid); + if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) + return -EFAULT; + binder_stat_br(proc, thread, BR_SPAWN_LOOPER); + } else + binder_inner_proc_unlock(proc); + return 0; +} + +static void binder_release_work(struct binder_proc *proc, + struct list_head *list) +{ + struct binder_work *w; + + while (1) { + w = binder_dequeue_work_head(proc, list); + if (!w) + return; + + switch (w->type) { + case BINDER_WORK_TRANSACTION: { + struct binder_transaction *t; + + t = container_of(w, struct binder_transaction, work); + + binder_cleanup_transaction(t, "process died.", + BR_DEAD_REPLY); + } break; + case BINDER_WORK_RETURN_ERROR: { + struct binder_error *e = container_of( + w, struct binder_error, work); + + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "undelivered TRANSACTION_ERROR: %u\n", + e->cmd); + } break; + case BINDER_WORK_TRANSACTION_COMPLETE: { + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "undelivered TRANSACTION_COMPLETE\n"); + kfree(w); + binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); + } break; + case BINDER_WORK_DEAD_BINDER_AND_CLEAR: + case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { + struct binder_ref_death *death; + + death = container_of(w, struct binder_ref_death, work); + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "undelivered death notification, %016llx\n", + (u64)death->cookie); + kfree(death); + binder_stats_deleted(BINDER_STAT_DEATH); + } break; + default: + pr_err("unexpected work type, %d, not freed\n", + w->type); + break; + } + } + +} + +static struct binder_thread *binder_get_thread_ilocked( + struct binder_proc *proc, struct binder_thread *new_thread) +{ + struct binder_thread *thread = NULL; + struct rb_node *parent = NULL; + struct rb_node **p = &proc->threads.rb_node; + + while (*p) { + parent = *p; + thread = rb_entry(parent, struct binder_thread, rb_node); + + if (current->pid < thread->pid) + p = &(*p)->rb_left; + else if (current->pid > thread->pid) + p = &(*p)->rb_right; + else + return thread; + } + if (!new_thread) + return NULL; + thread = new_thread; + binder_stats_created(BINDER_STAT_THREAD); + thread->proc = proc; + thread->pid = current->pid; + atomic_set(&thread->tmp_ref, 0); + init_waitqueue_head(&thread->wait); + INIT_LIST_HEAD(&thread->todo); + rb_link_node(&thread->rb_node, parent, p); + rb_insert_color(&thread->rb_node, &proc->threads); + thread->looper_need_return = true; + thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; + thread->return_error.cmd = BR_OK; + thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; + thread->reply_error.cmd = BR_OK; + INIT_LIST_HEAD(&new_thread->waiting_thread_node); + return thread; +} + +static struct binder_thread *binder_get_thread(struct binder_proc *proc) +{ + struct binder_thread *thread; + struct binder_thread *new_thread; + + binder_inner_proc_lock(proc); + thread = binder_get_thread_ilocked(proc, NULL); + binder_inner_proc_unlock(proc); + if (!thread) { + new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); + if (new_thread == NULL) + return NULL; + binder_inner_proc_lock(proc); + thread = binder_get_thread_ilocked(proc, new_thread); + binder_inner_proc_unlock(proc); + if (thread != new_thread) + kfree(new_thread); + } + return thread; +} + +static void binder_free_proc(struct binder_proc *proc) +{ + BUG_ON(!list_empty(&proc->todo)); + BUG_ON(!list_empty(&proc->delivered_death)); + binder_alloc_deferred_release(&proc->alloc); + put_task_struct(proc->tsk); + binder_stats_deleted(BINDER_STAT_PROC); + kfree(proc); +} + +static void binder_free_thread(struct binder_thread *thread) +{ + BUG_ON(!list_empty(&thread->todo)); + binder_stats_deleted(BINDER_STAT_THREAD); + binder_proc_dec_tmpref(thread->proc); + kfree(thread); +} + +static int binder_thread_release(struct binder_proc *proc, + struct binder_thread *thread) +{ + struct binder_transaction *t; + struct binder_transaction *send_reply = NULL; + int active_transactions = 0; + struct binder_transaction *last_t = NULL; + + binder_inner_proc_lock(thread->proc); + /* + * take a ref on the proc so it survives + * after we remove this thread from proc->threads. + * The corresponding dec is when we actually + * free the thread in binder_free_thread() + */ + proc->tmp_ref++; + /* + * take a ref on this thread to ensure it + * survives while we are releasing it + */ + atomic_inc(&thread->tmp_ref); + rb_erase(&thread->rb_node, &proc->threads); + t = thread->transaction_stack; + if (t) { + spin_lock(&t->lock); + if (t->to_thread == thread) + send_reply = t; + } + thread->is_dead = true; + + while (t) { + last_t = t; + active_transactions++; + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "release %d:%d transaction %d %s, still active\n", + proc->pid, thread->pid, + t->debug_id, + (t->to_thread == thread) ? "in" : "out"); + + if (t->to_thread == thread) { + t->to_proc = NULL; + t->to_thread = NULL; + if (t->buffer) { + t->buffer->transaction = NULL; + t->buffer = NULL; + } + t = t->to_parent; + } else if (t->from == thread) { + t->from = NULL; + t = t->from_parent; + } else + BUG(); + spin_unlock(&last_t->lock); + if (t) + spin_lock(&t->lock); + } + + /* + * If this thread used poll, make sure we remove the waitqueue + * from any epoll data structures holding it with POLLFREE. + * waitqueue_active() is safe to use here because we're holding + * the inner lock. + */ + if ((thread->looper & BINDER_LOOPER_STATE_POLL) && + waitqueue_active(&thread->wait)) { + wake_up_poll(&thread->wait, POLLHUP | POLLFREE); + } + + binder_inner_proc_unlock(thread->proc); + + if (send_reply) + binder_send_failed_reply(send_reply, BR_DEAD_REPLY); + binder_release_work(proc, &thread->todo); + binder_thread_dec_tmpref(thread); + return active_transactions; +} + +static unsigned int binder_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct binder_proc *proc = filp->private_data; + struct binder_thread *thread = NULL; + bool wait_for_proc_work; + + thread = binder_get_thread(proc); + + binder_inner_proc_lock(thread->proc); + thread->looper |= BINDER_LOOPER_STATE_POLL; + wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); + + binder_inner_proc_unlock(thread->proc); + + poll_wait(filp, &thread->wait, wait); + + if (binder_has_work(thread, wait_for_proc_work)) + return POLLIN; + + return 0; +} + +static int binder_ioctl_write_read(struct file *filp, + unsigned int cmd, unsigned long arg, + struct binder_thread *thread) +{ + int ret = 0; + struct binder_proc *proc = filp->private_data; + unsigned int size = _IOC_SIZE(cmd); + void __user *ubuf = (void __user *)arg; + struct binder_write_read bwr; + + if (size != sizeof(struct binder_write_read)) { + ret = -EINVAL; + goto out; + } + if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { + ret = -EFAULT; + goto out; + } + binder_debug(BINDER_DEBUG_READ_WRITE, + "%d:%d write %lld at %016llx, read %lld at %016llx\n", + proc->pid, thread->pid, + (u64)bwr.write_size, (u64)bwr.write_buffer, + (u64)bwr.read_size, (u64)bwr.read_buffer); + + if (bwr.write_size > 0) { + ret = binder_thread_write(proc, thread, + bwr.write_buffer, + bwr.write_size, + &bwr.write_consumed); + trace_binder_write_done(ret); + if (ret < 0) { + bwr.read_consumed = 0; + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) + ret = -EFAULT; + goto out; + } + } + if (bwr.read_size > 0) { + ret = binder_thread_read(proc, thread, bwr.read_buffer, + bwr.read_size, + &bwr.read_consumed, + filp->f_flags & O_NONBLOCK); + trace_binder_read_done(ret); + binder_inner_proc_lock(proc); + if (!binder_worklist_empty_ilocked(&proc->todo)) + binder_wakeup_proc_ilocked(proc); + binder_inner_proc_unlock(proc); + if (ret < 0) { + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) + ret = -EFAULT; + goto out; + } + } + binder_debug(BINDER_DEBUG_READ_WRITE, + "%d:%d wrote %lld of %lld, read return %lld of %lld\n", + proc->pid, thread->pid, + (u64)bwr.write_consumed, (u64)bwr.write_size, + (u64)bwr.read_consumed, (u64)bwr.read_size); + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { + ret = -EFAULT; + goto out; + } +out: + return ret; +} + +static int binder_ioctl_set_ctx_mgr(struct file *filp) +{ + int ret = 0; + struct binder_proc *proc = filp->private_data; + struct binder_context *context = proc->context; + struct binder_node *new_node; + kuid_t curr_euid = current_euid(); + + mutex_lock(&context->context_mgr_node_lock); + if (context->binder_context_mgr_node) { + pr_err("BINDER_SET_CONTEXT_MGR already set\n"); + ret = -EBUSY; + goto out; + } + ret = security_binder_set_context_mgr(proc->tsk); + if (ret < 0) + goto out; + if (uid_valid(context->binder_context_mgr_uid)) { + if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { + pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", + from_kuid(&init_user_ns, curr_euid), + from_kuid(&init_user_ns, + context->binder_context_mgr_uid)); + ret = -EPERM; + goto out; + } + } else { + context->binder_context_mgr_uid = curr_euid; + } + new_node = binder_new_node(proc, NULL); + if (!new_node) { + ret = -ENOMEM; + goto out; + } + binder_node_lock(new_node); + new_node->local_weak_refs++; + new_node->local_strong_refs++; + new_node->has_strong_ref = 1; + new_node->has_weak_ref = 1; + context->binder_context_mgr_node = new_node; + binder_node_unlock(new_node); + binder_put_node(new_node); +out: + mutex_unlock(&context->context_mgr_node_lock); + return ret; +} + +static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, + struct binder_node_debug_info *info) +{ + struct rb_node *n; + binder_uintptr_t ptr = info->ptr; + + memset(info, 0, sizeof(*info)); + + binder_inner_proc_lock(proc); + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { + struct binder_node *node = rb_entry(n, struct binder_node, + rb_node); + if (node->ptr > ptr) { + info->ptr = node->ptr; + info->cookie = node->cookie; + info->has_strong_ref = node->has_strong_ref; + info->has_weak_ref = node->has_weak_ref; + break; + } + } + binder_inner_proc_unlock(proc); + + return 0; +} + +static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int ret; + struct binder_proc *proc = filp->private_data; + struct binder_thread *thread; + unsigned int size = _IOC_SIZE(cmd); + void __user *ubuf = (void __user *)arg; + + /*pr_info("binder_ioctl: %d:%d %x %lx\n", + proc->pid, current->pid, cmd, arg);*/ + + binder_selftest_alloc(&proc->alloc); + + trace_binder_ioctl(cmd, arg); + + ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); + if (ret) + goto err_unlocked; + + thread = binder_get_thread(proc); + if (thread == NULL) { + ret = -ENOMEM; + goto err; + } + + switch (cmd) { + case BINDER_WRITE_READ: + ret = binder_ioctl_write_read(filp, cmd, arg, thread); + if (ret) + goto err; + break; + case BINDER_SET_MAX_THREADS: { + int max_threads; + + if (copy_from_user(&max_threads, ubuf, + sizeof(max_threads))) { + ret = -EINVAL; + goto err; + } + binder_inner_proc_lock(proc); + proc->max_threads = max_threads; + binder_inner_proc_unlock(proc); + break; + } + case BINDER_SET_CONTEXT_MGR: + ret = binder_ioctl_set_ctx_mgr(filp); + if (ret) + goto err; + break; + case BINDER_THREAD_EXIT: + binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", + proc->pid, thread->pid); + binder_thread_release(proc, thread); + thread = NULL; + break; + case BINDER_VERSION: { + struct binder_version __user *ver = ubuf; + + if (size != sizeof(struct binder_version)) { + ret = -EINVAL; + goto err; + } + if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, + &ver->protocol_version)) { + ret = -EINVAL; + goto err; + } + break; + } + case BINDER_GET_NODE_DEBUG_INFO: { + struct binder_node_debug_info info; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + ret = binder_ioctl_get_node_debug_info(proc, &info); + if (ret < 0) + goto err; + + if (copy_to_user(ubuf, &info, sizeof(info))) { + ret = -EFAULT; + goto err; + } + break; + } + default: + ret = -EINVAL; + goto err; + } + ret = 0; +err: + if (thread) + thread->looper_need_return = false; + wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); + if (ret && ret != -ERESTARTSYS) + pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); +err_unlocked: + trace_binder_ioctl_done(ret); + return ret; +} + +static void binder_vma_open(struct vm_area_struct *vma) +{ + struct binder_proc *proc = vma->vm_private_data; + + binder_debug(BINDER_DEBUG_OPEN_CLOSE, + "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", + proc->pid, vma->vm_start, vma->vm_end, + (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, + (unsigned long)pgprot_val(vma->vm_page_prot)); +} + +static void binder_vma_close(struct vm_area_struct *vma) +{ + struct binder_proc *proc = vma->vm_private_data; + + binder_debug(BINDER_DEBUG_OPEN_CLOSE, + "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", + proc->pid, vma->vm_start, vma->vm_end, + (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, + (unsigned long)pgprot_val(vma->vm_page_prot)); + binder_alloc_vma_close(&proc->alloc); + binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +static int binder_vm_fault(struct vm_fault *vmf) +#else +static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +#endif +{ + return VM_FAULT_SIGBUS; +} + +static const struct vm_operations_struct binder_vm_ops = { + .open = binder_vma_open, + .close = binder_vma_close, + .fault = binder_vm_fault, +}; + +static int binder_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret; + struct binder_proc *proc = filp->private_data; + const char *failure_string; + + if (proc->tsk != current->group_leader) + return -EINVAL; + + if ((vma->vm_end - vma->vm_start) > SZ_4M) + vma->vm_end = vma->vm_start + SZ_4M; + + binder_debug(BINDER_DEBUG_OPEN_CLOSE, + "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", + __func__, proc->pid, vma->vm_start, vma->vm_end, + (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, + (unsigned long)pgprot_val(vma->vm_page_prot)); + + if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { + ret = -EPERM; + failure_string = "bad vm_flags"; + goto err_bad_arg; + } + vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; + vma->vm_ops = &binder_vm_ops; + vma->vm_private_data = proc; + + ret = binder_alloc_mmap_handler(&proc->alloc, vma); + if (ret) + return ret; + mutex_lock(&proc->files_lock); + proc->files = get_files_struct(current); + mutex_unlock(&proc->files_lock); + return 0; + +err_bad_arg: + pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", + proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); + return ret; +} + +static int binder_open(struct inode *nodp, struct file *filp) +{ + struct binder_proc *proc; + struct binder_device *binder_dev; + + binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", + current->group_leader->pid, current->pid); + + proc = kzalloc(sizeof(*proc), GFP_KERNEL); + if (proc == NULL) + return -ENOMEM; + spin_lock_init(&proc->inner_lock); + spin_lock_init(&proc->outer_lock); + get_task_struct(current->group_leader); + proc->tsk = current->group_leader; + mutex_init(&proc->files_lock); + INIT_LIST_HEAD(&proc->todo); + proc->default_priority = task_nice(current); + /* binderfs stashes devices in i_private */ + if (is_binderfs_device(nodp)) + binder_dev = nodp->i_private; + else + binder_dev = container_of(filp->private_data, + struct binder_device, miscdev); + proc->context = &binder_dev->context; + binder_alloc_init(&proc->alloc); + + binder_stats_created(BINDER_STAT_PROC); + proc->pid = current->group_leader->pid; + INIT_LIST_HEAD(&proc->delivered_death); + INIT_LIST_HEAD(&proc->waiting_threads); + filp->private_data = proc; + + mutex_lock(&binder_procs_lock); + hlist_add_head(&proc->proc_node, &binder_procs); + mutex_unlock(&binder_procs_lock); + + if (binder_debugfs_dir_entry_proc) { + char strbuf[11]; + + snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); + /* + * proc debug entries are shared between contexts, so + * this will fail if the process tries to open the driver + * again with a different context. The priting code will + * anyway print all contexts that a given PID has, so this + * is not a problem. + */ + proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, + binder_debugfs_dir_entry_proc, + (void *)(unsigned long)proc->pid, + &binder_proc_fops); + } + + return 0; +} + +static int binder_flush(struct file *filp, fl_owner_t id) +{ + struct binder_proc *proc = filp->private_data; + + binder_defer_work(proc, BINDER_DEFERRED_FLUSH); + + return 0; +} + +static void binder_deferred_flush(struct binder_proc *proc) +{ + struct rb_node *n; + int wake_count = 0; + + binder_inner_proc_lock(proc); + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { + struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); + + thread->looper_need_return = true; + if (thread->looper & BINDER_LOOPER_STATE_WAITING) { + wake_up_interruptible(&thread->wait); + wake_count++; + } + } + binder_inner_proc_unlock(proc); + + binder_debug(BINDER_DEBUG_OPEN_CLOSE, + "binder_flush: %d woke %d threads\n", proc->pid, + wake_count); +} + +static int binder_release(struct inode *nodp, struct file *filp) +{ + struct binder_proc *proc = filp->private_data; + + debugfs_remove(proc->debugfs_entry); + binder_defer_work(proc, BINDER_DEFERRED_RELEASE); + + return 0; +} + +static int binder_node_release(struct binder_node *node, int refs) +{ + struct binder_ref *ref; + int death = 0; + struct binder_proc *proc = node->proc; + + binder_release_work(proc, &node->async_todo); + + binder_node_lock(node); + binder_inner_proc_lock(proc); + binder_dequeue_work_ilocked(&node->work); + /* + * The caller must have taken a temporary ref on the node, + */ + BUG_ON(!node->tmp_refs); + if (hlist_empty(&node->refs) && node->tmp_refs == 1) { + binder_inner_proc_unlock(proc); + binder_node_unlock(node); + binder_free_node(node); + + return refs; + } + + node->proc = NULL; + node->local_strong_refs = 0; + node->local_weak_refs = 0; + binder_inner_proc_unlock(proc); + + spin_lock(&binder_dead_nodes_lock); + hlist_add_head(&node->dead_node, &binder_dead_nodes); + spin_unlock(&binder_dead_nodes_lock); + + hlist_for_each_entry(ref, &node->refs, node_entry) { + refs++; + /* + * Need the node lock to synchronize + * with new notification requests and the + * inner lock to synchronize with queued + * death notifications. + */ + binder_inner_proc_lock(ref->proc); + if (!ref->death) { + binder_inner_proc_unlock(ref->proc); + continue; + } + + death++; + + BUG_ON(!list_empty(&ref->death->work.entry)); + ref->death->work.type = BINDER_WORK_DEAD_BINDER; + binder_enqueue_work_ilocked(&ref->death->work, + &ref->proc->todo); + binder_wakeup_proc_ilocked(ref->proc); + binder_inner_proc_unlock(ref->proc); + } + + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "node %d now dead, refs %d, death %d\n", + node->debug_id, refs, death); + binder_node_unlock(node); + binder_put_node(node); + + return refs; +} + +static void binder_deferred_release(struct binder_proc *proc) +{ + struct binder_context *context = proc->context; + struct rb_node *n; + int threads, nodes, incoming_refs, outgoing_refs, active_transactions; + + BUG_ON(proc->files); + + mutex_lock(&binder_procs_lock); + hlist_del(&proc->proc_node); + mutex_unlock(&binder_procs_lock); + + mutex_lock(&context->context_mgr_node_lock); + if (context->binder_context_mgr_node && + context->binder_context_mgr_node->proc == proc) { + binder_debug(BINDER_DEBUG_DEAD_BINDER, + "%s: %d context_mgr_node gone\n", + __func__, proc->pid); + context->binder_context_mgr_node = NULL; + } + mutex_unlock(&context->context_mgr_node_lock); + binder_inner_proc_lock(proc); + /* + * Make sure proc stays alive after we + * remove all the threads + */ + proc->tmp_ref++; + + proc->is_dead = true; + threads = 0; + active_transactions = 0; + while ((n = rb_first(&proc->threads))) { + struct binder_thread *thread; + + thread = rb_entry(n, struct binder_thread, rb_node); + binder_inner_proc_unlock(proc); + threads++; + active_transactions += binder_thread_release(proc, thread); + binder_inner_proc_lock(proc); + } + + nodes = 0; + incoming_refs = 0; + while ((n = rb_first(&proc->nodes))) { + struct binder_node *node; + + node = rb_entry(n, struct binder_node, rb_node); + nodes++; + /* + * take a temporary ref on the node before + * calling binder_node_release() which will either + * kfree() the node or call binder_put_node() + */ + binder_inc_node_tmpref_ilocked(node); + rb_erase(&node->rb_node, &proc->nodes); + binder_inner_proc_unlock(proc); + incoming_refs = binder_node_release(node, incoming_refs); + binder_inner_proc_lock(proc); + } + binder_inner_proc_unlock(proc); + + outgoing_refs = 0; + binder_proc_lock(proc); + while ((n = rb_first(&proc->refs_by_desc))) { + struct binder_ref *ref; + + ref = rb_entry(n, struct binder_ref, rb_node_desc); + outgoing_refs++; + binder_cleanup_ref_olocked(ref); + binder_proc_unlock(proc); + binder_free_ref(ref); + binder_proc_lock(proc); + } + binder_proc_unlock(proc); + + binder_release_work(proc, &proc->todo); + binder_release_work(proc, &proc->delivered_death); + + binder_debug(BINDER_DEBUG_OPEN_CLOSE, + "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", + __func__, proc->pid, threads, nodes, incoming_refs, + outgoing_refs, active_transactions); + + binder_proc_dec_tmpref(proc); +} + +static void binder_deferred_func(struct work_struct *work) +{ + struct binder_proc *proc; + struct files_struct *files; + + int defer; + + do { + mutex_lock(&binder_deferred_lock); + if (!hlist_empty(&binder_deferred_list)) { + proc = hlist_entry(binder_deferred_list.first, + struct binder_proc, deferred_work_node); + hlist_del_init(&proc->deferred_work_node); + defer = proc->deferred_work; + proc->deferred_work = 0; + } else { + proc = NULL; + defer = 0; + } + mutex_unlock(&binder_deferred_lock); + + files = NULL; + if (defer & BINDER_DEFERRED_PUT_FILES) { + mutex_lock(&proc->files_lock); + files = proc->files; + if (files) + proc->files = NULL; + mutex_unlock(&proc->files_lock); + } + + if (defer & BINDER_DEFERRED_FLUSH) + binder_deferred_flush(proc); + + if (defer & BINDER_DEFERRED_RELEASE) + binder_deferred_release(proc); /* frees proc */ + + if (files) + put_files_struct(files); + } while (proc); +} +static DECLARE_WORK(binder_deferred_work, binder_deferred_func); + +static void +binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) +{ + mutex_lock(&binder_deferred_lock); + proc->deferred_work |= defer; + if (hlist_unhashed(&proc->deferred_work_node)) { + hlist_add_head(&proc->deferred_work_node, + &binder_deferred_list); + schedule_work(&binder_deferred_work); + } + mutex_unlock(&binder_deferred_lock); +} + +static void print_binder_transaction_ilocked(struct seq_file *m, + struct binder_proc *proc, + const char *prefix, + struct binder_transaction *t) +{ + struct binder_proc *to_proc; + struct binder_buffer *buffer = t->buffer; + + spin_lock(&t->lock); + to_proc = t->to_proc; + seq_printf(m, + "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", + prefix, t->debug_id, t, + t->from ? t->from->proc->pid : 0, + t->from ? t->from->pid : 0, + to_proc ? to_proc->pid : 0, + t->to_thread ? t->to_thread->pid : 0, + t->code, t->flags, t->priority, t->need_reply); + spin_unlock(&t->lock); + + if (proc != to_proc) { + /* + * Can only safely deref buffer if we are holding the + * correct proc inner lock for this node + */ + seq_puts(m, "\n"); + return; + } + + if (buffer == NULL) { + seq_puts(m, " buffer free\n"); + return; + } + if (buffer->target_node) + seq_printf(m, " node %d", buffer->target_node->debug_id); + seq_printf(m, " size %zd:%zd data %p\n", + buffer->data_size, buffer->offsets_size, + buffer->data); +} + +static void print_binder_work_ilocked(struct seq_file *m, + struct binder_proc *proc, + const char *prefix, + const char *transaction_prefix, + struct binder_work *w) +{ + struct binder_node *node; + struct binder_transaction *t; + + switch (w->type) { + case BINDER_WORK_TRANSACTION: + t = container_of(w, struct binder_transaction, work); + print_binder_transaction_ilocked( + m, proc, transaction_prefix, t); + break; + case BINDER_WORK_RETURN_ERROR: { + struct binder_error *e = container_of( + w, struct binder_error, work); + + seq_printf(m, "%stransaction error: %u\n", + prefix, e->cmd); + } break; + case BINDER_WORK_TRANSACTION_COMPLETE: + seq_printf(m, "%stransaction complete\n", prefix); + break; + case BINDER_WORK_NODE: + node = container_of(w, struct binder_node, work); + seq_printf(m, "%snode work %d: u%016llx c%016llx\n", + prefix, node->debug_id, + (u64)node->ptr, (u64)node->cookie); + break; + case BINDER_WORK_DEAD_BINDER: + seq_printf(m, "%shas dead binder\n", prefix); + break; + case BINDER_WORK_DEAD_BINDER_AND_CLEAR: + seq_printf(m, "%shas cleared dead binder\n", prefix); + break; + case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: + seq_printf(m, "%shas cleared death notification\n", prefix); + break; + default: + seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); + break; + } +} + +static void print_binder_thread_ilocked(struct seq_file *m, + struct binder_thread *thread, + int print_always) +{ + struct binder_transaction *t; + struct binder_work *w; + size_t start_pos = m->count; + size_t header_pos; + + seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", + thread->pid, thread->looper, + thread->looper_need_return, + atomic_read(&thread->tmp_ref)); + header_pos = m->count; + t = thread->transaction_stack; + while (t) { + if (t->from == thread) { + print_binder_transaction_ilocked(m, thread->proc, + " outgoing transaction", t); + t = t->from_parent; + } else if (t->to_thread == thread) { + print_binder_transaction_ilocked(m, thread->proc, + " incoming transaction", t); + t = t->to_parent; + } else { + print_binder_transaction_ilocked(m, thread->proc, + " bad transaction", t); + t = NULL; + } + } + list_for_each_entry(w, &thread->todo, entry) { + print_binder_work_ilocked(m, thread->proc, " ", + " pending transaction", w); + } + if (!print_always && m->count == header_pos) + m->count = start_pos; +} + +static void print_binder_node_nilocked(struct seq_file *m, + struct binder_node *node) +{ + struct binder_ref *ref; + struct binder_work *w; + int count; + + count = 0; + hlist_for_each_entry(ref, &node->refs, node_entry) + count++; + + seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", + node->debug_id, (u64)node->ptr, (u64)node->cookie, + node->has_strong_ref, node->has_weak_ref, + node->local_strong_refs, node->local_weak_refs, + node->internal_strong_refs, count, node->tmp_refs); + if (count) { + seq_puts(m, " proc"); + hlist_for_each_entry(ref, &node->refs, node_entry) + seq_printf(m, " %d", ref->proc->pid); + } + seq_puts(m, "\n"); + if (node->proc) { + list_for_each_entry(w, &node->async_todo, entry) + print_binder_work_ilocked(m, node->proc, " ", + " pending async transaction", w); + } +} + +static void print_binder_ref_olocked(struct seq_file *m, + struct binder_ref *ref) +{ + binder_node_lock(ref->node); + seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", + ref->data.debug_id, ref->data.desc, + ref->node->proc ? "" : "dead ", + ref->node->debug_id, ref->data.strong, + ref->data.weak, ref->death); + binder_node_unlock(ref->node); +} + +static void print_binder_proc(struct seq_file *m, + struct binder_proc *proc, int print_all) +{ + struct binder_work *w; + struct rb_node *n; + size_t start_pos = m->count; + size_t header_pos; + struct binder_node *last_node = NULL; + + seq_printf(m, "proc %d\n", proc->pid); + seq_printf(m, "context %s\n", proc->context->name); + header_pos = m->count; + + binder_inner_proc_lock(proc); + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) + print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, + rb_node), print_all); + + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { + struct binder_node *node = rb_entry(n, struct binder_node, + rb_node); + /* + * take a temporary reference on the node so it + * survives and isn't removed from the tree + * while we print it. + */ + binder_inc_node_tmpref_ilocked(node); + /* Need to drop inner lock to take node lock */ + binder_inner_proc_unlock(proc); + if (last_node) + binder_put_node(last_node); + binder_node_inner_lock(node); + print_binder_node_nilocked(m, node); + binder_node_inner_unlock(node); + last_node = node; + binder_inner_proc_lock(proc); + } + binder_inner_proc_unlock(proc); + if (last_node) + binder_put_node(last_node); + + if (print_all) { + binder_proc_lock(proc); + for (n = rb_first(&proc->refs_by_desc); + n != NULL; + n = rb_next(n)) + print_binder_ref_olocked(m, rb_entry(n, + struct binder_ref, + rb_node_desc)); + binder_proc_unlock(proc); + } + binder_alloc_print_allocated(m, &proc->alloc); + binder_inner_proc_lock(proc); + list_for_each_entry(w, &proc->todo, entry) + print_binder_work_ilocked(m, proc, " ", + " pending transaction", w); + list_for_each_entry(w, &proc->delivered_death, entry) { + seq_puts(m, " has delivered dead binder\n"); + break; + } + binder_inner_proc_unlock(proc); + if (!print_all && m->count == header_pos) + m->count = start_pos; +} + +static const char * const binder_return_strings[] = { + "BR_ERROR", + "BR_OK", + "BR_TRANSACTION", + "BR_REPLY", + "BR_ACQUIRE_RESULT", + "BR_DEAD_REPLY", + "BR_TRANSACTION_COMPLETE", + "BR_INCREFS", + "BR_ACQUIRE", + "BR_RELEASE", + "BR_DECREFS", + "BR_ATTEMPT_ACQUIRE", + "BR_NOOP", + "BR_SPAWN_LOOPER", + "BR_FINISHED", + "BR_DEAD_BINDER", + "BR_CLEAR_DEATH_NOTIFICATION_DONE", + "BR_FAILED_REPLY" +}; + +static const char * const binder_command_strings[] = { + "BC_TRANSACTION", + "BC_REPLY", + "BC_ACQUIRE_RESULT", + "BC_FREE_BUFFER", + "BC_INCREFS", + "BC_ACQUIRE", + "BC_RELEASE", + "BC_DECREFS", + "BC_INCREFS_DONE", + "BC_ACQUIRE_DONE", + "BC_ATTEMPT_ACQUIRE", + "BC_REGISTER_LOOPER", + "BC_ENTER_LOOPER", + "BC_EXIT_LOOPER", + "BC_REQUEST_DEATH_NOTIFICATION", + "BC_CLEAR_DEATH_NOTIFICATION", + "BC_DEAD_BINDER_DONE", + "BC_TRANSACTION_SG", + "BC_REPLY_SG", +}; + +static const char * const binder_objstat_strings[] = { + "proc", + "thread", + "node", + "ref", + "death", + "transaction", + "transaction_complete" +}; + +static void print_binder_stats(struct seq_file *m, const char *prefix, + struct binder_stats *stats) +{ + int i; + + BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != + ARRAY_SIZE(binder_command_strings)); + for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { + int temp = atomic_read(&stats->bc[i]); + + if (temp) + seq_printf(m, "%s%s: %d\n", prefix, + binder_command_strings[i], temp); + } + + BUILD_BUG_ON(ARRAY_SIZE(stats->br) != + ARRAY_SIZE(binder_return_strings)); + for (i = 0; i < ARRAY_SIZE(stats->br); i++) { + int temp = atomic_read(&stats->br[i]); + + if (temp) + seq_printf(m, "%s%s: %d\n", prefix, + binder_return_strings[i], temp); + } + + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != + ARRAY_SIZE(binder_objstat_strings)); + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != + ARRAY_SIZE(stats->obj_deleted)); + for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { + int created = atomic_read(&stats->obj_created[i]); + int deleted = atomic_read(&stats->obj_deleted[i]); + + if (created || deleted) + seq_printf(m, "%s%s: active %d total %d\n", + prefix, + binder_objstat_strings[i], + created - deleted, + created); + } +} + +static void print_binder_proc_stats(struct seq_file *m, + struct binder_proc *proc) +{ + struct binder_work *w; + struct binder_thread *thread; + struct rb_node *n; + int count, strong, weak, ready_threads; + size_t free_async_space = + binder_alloc_get_free_async_space(&proc->alloc); + + seq_printf(m, "proc %d\n", proc->pid); + seq_printf(m, "context %s\n", proc->context->name); + count = 0; + ready_threads = 0; + binder_inner_proc_lock(proc); + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) + count++; + + list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) + ready_threads++; + + seq_printf(m, " threads: %d\n", count); + seq_printf(m, " requested threads: %d+%d/%d\n" + " ready threads %d\n" + " free async space %zd\n", proc->requested_threads, + proc->requested_threads_started, proc->max_threads, + ready_threads, + free_async_space); + count = 0; + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) + count++; + binder_inner_proc_unlock(proc); + seq_printf(m, " nodes: %d\n", count); + count = 0; + strong = 0; + weak = 0; + binder_proc_lock(proc); + for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { + struct binder_ref *ref = rb_entry(n, struct binder_ref, + rb_node_desc); + count++; + strong += ref->data.strong; + weak += ref->data.weak; + } + binder_proc_unlock(proc); + seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); + + count = binder_alloc_get_allocated_count(&proc->alloc); + seq_printf(m, " buffers: %d\n", count); + + binder_alloc_print_pages(m, &proc->alloc); + + count = 0; + binder_inner_proc_lock(proc); + list_for_each_entry(w, &proc->todo, entry) { + if (w->type == BINDER_WORK_TRANSACTION) + count++; + } + binder_inner_proc_unlock(proc); + seq_printf(m, " pending transactions: %d\n", count); + + print_binder_stats(m, " ", &proc->stats); +} + + +static int binder_state_show(struct seq_file *m, void *unused) +{ + struct binder_proc *proc; + struct binder_node *node; + struct binder_node *last_node = NULL; + + seq_puts(m, "binder state:\n"); + + spin_lock(&binder_dead_nodes_lock); + if (!hlist_empty(&binder_dead_nodes)) + seq_puts(m, "dead nodes:\n"); + hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { + /* + * take a temporary reference on the node so it + * survives and isn't removed from the list + * while we print it. + */ + node->tmp_refs++; + spin_unlock(&binder_dead_nodes_lock); + if (last_node) + binder_put_node(last_node); + binder_node_lock(node); + print_binder_node_nilocked(m, node); + binder_node_unlock(node); + last_node = node; + spin_lock(&binder_dead_nodes_lock); + } + spin_unlock(&binder_dead_nodes_lock); + if (last_node) + binder_put_node(last_node); + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(proc, &binder_procs, proc_node) + print_binder_proc(m, proc, 1); + mutex_unlock(&binder_procs_lock); + + return 0; +} + +static int binder_stats_show(struct seq_file *m, void *unused) +{ + struct binder_proc *proc; + + seq_puts(m, "binder stats:\n"); + + print_binder_stats(m, "", &binder_stats); + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(proc, &binder_procs, proc_node) + print_binder_proc_stats(m, proc); + mutex_unlock(&binder_procs_lock); + + return 0; +} + +static int binder_transactions_show(struct seq_file *m, void *unused) +{ + struct binder_proc *proc; + + seq_puts(m, "binder transactions:\n"); + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(proc, &binder_procs, proc_node) + print_binder_proc(m, proc, 0); + mutex_unlock(&binder_procs_lock); + + return 0; +} + +static int binder_proc_show(struct seq_file *m, void *unused) +{ + struct binder_proc *itr; + int pid = (unsigned long)m->private; + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(itr, &binder_procs, proc_node) { + if (itr->pid == pid) { + seq_puts(m, "binder proc state:\n"); + print_binder_proc(m, itr, 1); + } + } + mutex_unlock(&binder_procs_lock); + + return 0; +} + +static void print_binder_transaction_log_entry(struct seq_file *m, + struct binder_transaction_log_entry *e) +{ + int debug_id = READ_ONCE(e->debug_id_done); + /* + * read barrier to guarantee debug_id_done read before + * we print the log values + */ + smp_rmb(); + seq_printf(m, + "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", + e->debug_id, (e->call_type == 2) ? "reply" : + ((e->call_type == 1) ? "async" : "call "), e->from_proc, + e->from_thread, e->to_proc, e->to_thread, e->context_name, + e->to_node, e->target_handle, e->data_size, e->offsets_size, + e->return_error, e->return_error_param, + e->return_error_line); + /* + * read-barrier to guarantee read of debug_id_done after + * done printing the fields of the entry + */ + smp_rmb(); + seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? + "\n" : " (incomplete)\n"); +} + +static int binder_transaction_log_show(struct seq_file *m, void *unused) +{ + struct binder_transaction_log *log = m->private; + unsigned int log_cur = atomic_read(&log->cur); + unsigned int count; + unsigned int cur; + int i; + + count = log_cur + 1; + cur = count < ARRAY_SIZE(log->entry) && !log->full ? + 0 : count % ARRAY_SIZE(log->entry); + if (count > ARRAY_SIZE(log->entry) || log->full) + count = ARRAY_SIZE(log->entry); + for (i = 0; i < count; i++) { + unsigned int index = cur++ % ARRAY_SIZE(log->entry); + + print_binder_transaction_log_entry(m, &log->entry[index]); + } + return 0; +} + +const struct file_operations binder_fops = { + .owner = THIS_MODULE, + .poll = binder_poll, + .unlocked_ioctl = binder_ioctl, + .compat_ioctl = binder_ioctl, + .mmap = binder_mmap, + .open = binder_open, + .flush = binder_flush, + .release = binder_release, +}; + +EXPORT_SYMBOL(binder_fops); + +BINDER_DEBUG_ENTRY(state); +BINDER_DEBUG_ENTRY(stats); +BINDER_DEBUG_ENTRY(transactions); +BINDER_DEBUG_ENTRY(transaction_log); + +static int __init init_binder_device(const char *name) +{ + int ret; + struct binder_device *binder_device; + + binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); + if (!binder_device) + return -ENOMEM; + + binder_device->miscdev.fops = &binder_fops; + binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; + binder_device->miscdev.name = name; + + binder_device->context.binder_context_mgr_uid = INVALID_UID; + binder_device->context.name = name; + mutex_init(&binder_device->context.context_mgr_node_lock); + + ret = misc_register(&binder_device->miscdev); + if (ret < 0) { + kfree(binder_device); + return ret; + } + + hlist_add_head(&binder_device->hlist, &binder_devices); + + return ret; +} + +static int __init binder_init(void) +{ + int ret; + char *device_name, *device_names, *device_tmp; + struct binder_device *device; + struct hlist_node *tmp; + + binder_alloc_shrinker_init(); + + atomic_set(&binder_transaction_log.cur, ~0U); + atomic_set(&binder_transaction_log_failed.cur, ~0U); + + binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); + if (binder_debugfs_dir_entry_root) + binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", + binder_debugfs_dir_entry_root); + + if (binder_debugfs_dir_entry_root) { + debugfs_create_file("state", + S_IRUGO, + binder_debugfs_dir_entry_root, + NULL, + &binder_state_fops); + debugfs_create_file("stats", + S_IRUGO, + binder_debugfs_dir_entry_root, + NULL, + &binder_stats_fops); + debugfs_create_file("transactions", + S_IRUGO, + binder_debugfs_dir_entry_root, + NULL, + &binder_transactions_fops); + debugfs_create_file("transaction_log", + S_IRUGO, + binder_debugfs_dir_entry_root, + &binder_transaction_log, + &binder_transaction_log_fops); + debugfs_create_file("failed_transaction_log", + S_IRUGO, + binder_debugfs_dir_entry_root, + &binder_transaction_log_failed, + &binder_transaction_log_fops); + } + + /* + * Copy the module_parameter string, because we don't want to + * tokenize it in-place. + */ + device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); + if (!device_names) { + ret = -ENOMEM; + goto err_alloc_device_names_failed; + } + strcpy(device_names, binder_devices_param); + + device_tmp = device_names; + while ((device_name = strsep(&device_tmp, ","))) { + ret = init_binder_device(device_name); + if (ret) + goto err_init_binder_device_failed; + } + + return ret; + +err_init_binder_device_failed: + hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { + misc_deregister(&device->miscdev); + hlist_del(&device->hlist); + kfree(device); + } + + kfree(device_names); + +err_alloc_device_names_failed: + debugfs_remove_recursive(binder_debugfs_dir_entry_root); + + return ret; +} + +static void __exit binder_exit(void) +{ + struct binder_device *device; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { + misc_deregister(&device->miscdev); + kfree(device->context.name); + hlist_del(&device->hlist); + kfree(device); + } + + debugfs_remove_recursive(binder_debugfs_dir_entry_root); +} + +module_init(binder_init); +module_exit(binder_exit); + +#define CREATE_TRACE_POINTS +#include "binder_trace.h" + +MODULE_LICENSE("GPL v2"); diff --git a/binder/binder_alloc.c b/binder/binder_alloc.c new file mode 100644 index 0000000..5d27553 --- /dev/null +++ b/binder/binder_alloc.c @@ -0,0 +1,1016 @@ +/* binder_alloc.c + * + * Android IPC Subsystem + * + * Copyright (C) 2007-2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "binder_alloc.h" +#include "binder_trace.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif + +struct list_lru binder_alloc_lru; + +static DEFINE_MUTEX(binder_alloc_mmap_lock); + +enum { + BINDER_DEBUG_OPEN_CLOSE = 1U << 1, + BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, + BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, +}; +static uint32_t binder_alloc_debug_mask; + +module_param_named(debug_mask, binder_alloc_debug_mask, + uint, 0644); + +#define binder_alloc_debug(mask, x...) \ + do { \ + if (binder_alloc_debug_mask & mask) \ + pr_info(x); \ + } while (0) + +static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) +{ + return list_entry(buffer->entry.next, struct binder_buffer, entry); +} + +static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) +{ + return list_entry(buffer->entry.prev, struct binder_buffer, entry); +} + +static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + if (list_is_last(&buffer->entry, &alloc->buffers)) + return (u8 *)alloc->buffer + + alloc->buffer_size - (u8 *)buffer->data; + return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data; +} + +static void binder_insert_free_buffer(struct binder_alloc *alloc, + struct binder_buffer *new_buffer) +{ + struct rb_node **p = &alloc->free_buffers.rb_node; + struct rb_node *parent = NULL; + struct binder_buffer *buffer; + size_t buffer_size; + size_t new_buffer_size; + + BUG_ON(!new_buffer->free); + + new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: add free buffer, size %zd, at %pK\n", + alloc->pid, new_buffer_size, new_buffer); + + while (*p) { + parent = *p; + buffer = rb_entry(parent, struct binder_buffer, rb_node); + BUG_ON(!buffer->free); + + buffer_size = binder_alloc_buffer_size(alloc, buffer); + + if (new_buffer_size < buffer_size) + p = &parent->rb_left; + else + p = &parent->rb_right; + } + rb_link_node(&new_buffer->rb_node, parent, p); + rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); +} + +static void binder_insert_allocated_buffer_locked( + struct binder_alloc *alloc, struct binder_buffer *new_buffer) +{ + struct rb_node **p = &alloc->allocated_buffers.rb_node; + struct rb_node *parent = NULL; + struct binder_buffer *buffer; + + BUG_ON(new_buffer->free); + + while (*p) { + parent = *p; + buffer = rb_entry(parent, struct binder_buffer, rb_node); + BUG_ON(buffer->free); + + if (new_buffer->data < buffer->data) + p = &parent->rb_left; + else if (new_buffer->data > buffer->data) + p = &parent->rb_right; + else + BUG(); + } + rb_link_node(&new_buffer->rb_node, parent, p); + rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); +} + +static struct binder_buffer *binder_alloc_prepare_to_free_locked( + struct binder_alloc *alloc, + uintptr_t user_ptr) +{ + struct rb_node *n = alloc->allocated_buffers.rb_node; + struct binder_buffer *buffer; + void *kern_ptr; + + kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); + + while (n) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + BUG_ON(buffer->free); + + if (kern_ptr < buffer->data) + n = n->rb_left; + else if (kern_ptr > buffer->data) + n = n->rb_right; + else { + /* + * Guard against user threads attempting to + * free the buffer twice + */ + if (buffer->free_in_progress) { + pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", + alloc->pid, current->pid, (u64)user_ptr); + return NULL; + } + buffer->free_in_progress = 1; + return buffer; + } + } + return NULL; +} + +/** + * binder_alloc_buffer_lookup() - get buffer given user ptr + * @alloc: binder_alloc for this proc + * @user_ptr: User pointer to buffer data + * + * Validate userspace pointer to buffer data and return buffer corresponding to + * that user pointer. Search the rb tree for buffer that matches user data + * pointer. + * + * Return: Pointer to buffer or NULL + */ +struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, + uintptr_t user_ptr) +{ + struct binder_buffer *buffer; + + mutex_lock(&alloc->mutex); + buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); + mutex_unlock(&alloc->mutex); + return buffer; +} + +static int binder_update_page_range(struct binder_alloc *alloc, int allocate, + void *start, void *end, + struct vm_area_struct *vma) +{ + void *page_addr; + unsigned long user_page_addr; + struct binder_lru_page *page; + struct mm_struct *mm = NULL; + bool need_mm = false; + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: %s pages %pK-%pK\n", alloc->pid, + allocate ? "allocate" : "free", start, end); + + if (end <= start) + return 0; + + trace_binder_update_page_range(alloc, allocate, start, end); + + if (allocate == 0) + goto free_range; + + for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { + page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; + if (!page->page_ptr) { + need_mm = true; + break; + } + } + + if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm)) + mm = alloc->vma_vm_mm; + + if (mm) { + down_write(&mm->mmap_sem); + vma = alloc->vma; + } + + if (!vma && need_mm) { + pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", + alloc->pid); + goto err_no_vma; + } + + for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { + int ret; + bool on_lru; + size_t index; + + index = (page_addr - alloc->buffer) / PAGE_SIZE; + page = &alloc->pages[index]; + + if (page->page_ptr) { + trace_binder_alloc_lru_start(alloc, index); + + on_lru = list_lru_del(&binder_alloc_lru, &page->lru); + WARN_ON(!on_lru); + + trace_binder_alloc_lru_end(alloc, index); + continue; + } + + if (WARN_ON(!vma)) + goto err_page_ptr_cleared; + + trace_binder_alloc_page_start(alloc, index); + page->page_ptr = alloc_page(GFP_KERNEL | + __GFP_HIGHMEM | + __GFP_ZERO); + if (!page->page_ptr) { + pr_err("%d: binder_alloc_buf failed for page at %pK\n", + alloc->pid, page_addr); + goto err_alloc_page_failed; + } + page->alloc = alloc; + INIT_LIST_HEAD(&page->lru); + + ret = map_kernel_range_noflush((unsigned long)page_addr, + PAGE_SIZE, PAGE_KERNEL, + &page->page_ptr); + flush_cache_vmap((unsigned long)page_addr, + (unsigned long)page_addr + PAGE_SIZE); + if (ret != 1) { + pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", + alloc->pid, page_addr); + goto err_map_kernel_failed; + } + user_page_addr = + (uintptr_t)page_addr + alloc->user_buffer_offset; + ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); + if (ret) { + pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", + alloc->pid, user_page_addr); + goto err_vm_insert_page_failed; + } + + trace_binder_alloc_page_end(alloc, index); + /* vm_insert_page does not seem to increment the refcount */ + } + if (mm) { + up_write(&mm->mmap_sem); + mmput(mm); + } + return 0; + +free_range: + for (page_addr = end - PAGE_SIZE; page_addr >= start; + page_addr -= PAGE_SIZE) { + bool ret; + size_t index; + + index = (page_addr - alloc->buffer) / PAGE_SIZE; + page = &alloc->pages[index]; + + trace_binder_free_lru_start(alloc, index); + + ret = list_lru_add(&binder_alloc_lru, &page->lru); + WARN_ON(!ret); + + trace_binder_free_lru_end(alloc, index); + continue; + +err_vm_insert_page_failed: + unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); +err_map_kernel_failed: + __free_page(page->page_ptr); + page->page_ptr = NULL; +err_alloc_page_failed: +err_page_ptr_cleared: + ; + } +err_no_vma: + if (mm) { + up_write(&mm->mmap_sem); + mmput(mm); + } + return vma ? -ENOMEM : -ESRCH; +} + +struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) +{ + struct rb_node *n = alloc->free_buffers.rb_node; + struct binder_buffer *buffer; + size_t buffer_size; + struct rb_node *best_fit = NULL; + void *has_page_addr; + void *end_page_addr; + size_t size, data_offsets_size; + int ret; + + if (alloc->vma == NULL) { + pr_err("%d: binder_alloc_buf, no vma\n", + alloc->pid); + return ERR_PTR(-ESRCH); + } + + data_offsets_size = ALIGN(data_size, sizeof(void *)) + + ALIGN(offsets_size, sizeof(void *)); + + if (data_offsets_size < data_size || data_offsets_size < offsets_size) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid size %zd-%zd\n", + alloc->pid, data_size, offsets_size); + return ERR_PTR(-EINVAL); + } + size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); + if (size < data_offsets_size || size < extra_buffers_size) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid extra_buffers_size %zd\n", + alloc->pid, extra_buffers_size); + return ERR_PTR(-EINVAL); + } + if (is_async && + alloc->free_async_space < size + sizeof(struct binder_buffer)) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_alloc_buf size %zd failed, no async space left\n", + alloc->pid, size); + return ERR_PTR(-ENOSPC); + } + + /* Pad 0-size buffers so they get assigned unique addresses */ + size = max(size, sizeof(void *)); + + while (n) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + BUG_ON(!buffer->free); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + + if (size < buffer_size) { + best_fit = n; + n = n->rb_left; + } else if (size > buffer_size) + n = n->rb_right; + else { + best_fit = n; + break; + } + } + if (best_fit == NULL) { + size_t allocated_buffers = 0; + size_t largest_alloc_size = 0; + size_t total_alloc_size = 0; + size_t free_buffers = 0; + size_t largest_free_size = 0; + size_t total_free_size = 0; + + for (n = rb_first(&alloc->allocated_buffers); n != NULL; + n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + allocated_buffers++; + total_alloc_size += buffer_size; + if (buffer_size > largest_alloc_size) + largest_alloc_size = buffer_size; + } + for (n = rb_first(&alloc->free_buffers); n != NULL; + n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + free_buffers++; + total_free_size += buffer_size; + if (buffer_size > largest_free_size) + largest_free_size = buffer_size; + } + pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", + alloc->pid, size); + pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", + total_alloc_size, allocated_buffers, largest_alloc_size, + total_free_size, free_buffers, largest_free_size); + return ERR_PTR(-ENOSPC); + } + if (n == NULL) { + buffer = rb_entry(best_fit, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + } + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", + alloc->pid, size, buffer, buffer_size); + + has_page_addr = + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); + WARN_ON(n && buffer_size != size); + end_page_addr = + (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); + if (end_page_addr > has_page_addr) + end_page_addr = has_page_addr; + ret = binder_update_page_range(alloc, 1, + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL); + if (ret) + return ERR_PTR(ret); + + if (buffer_size != size) { + struct binder_buffer *new_buffer; + + new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!new_buffer) { + pr_err("%s: %d failed to alloc new buffer struct\n", + __func__, alloc->pid); + goto err_alloc_buf_struct_failed; + } + new_buffer->data = (u8 *)buffer->data + size; + list_add(&new_buffer->entry, &buffer->entry); + new_buffer->free = 1; + binder_insert_free_buffer(alloc, new_buffer); + } + + rb_erase(best_fit, &alloc->free_buffers); + buffer->free = 0; + buffer->free_in_progress = 0; + binder_insert_allocated_buffer_locked(alloc, buffer); + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_alloc_buf size %zd got %pK\n", + alloc->pid, size, buffer); + buffer->data_size = data_size; + buffer->offsets_size = offsets_size; + buffer->async_transaction = is_async; + buffer->extra_buffers_size = extra_buffers_size; + if (is_async) { + alloc->free_async_space -= size + sizeof(struct binder_buffer); + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, + "%d: binder_alloc_buf size %zd async free %zd\n", + alloc->pid, size, alloc->free_async_space); + } + return buffer; + +err_alloc_buf_struct_failed: + binder_update_page_range(alloc, 0, + (void *)PAGE_ALIGN((uintptr_t)buffer->data), + end_page_addr, NULL); + return ERR_PTR(-ENOMEM); +} + +/** + * binder_alloc_new_buf() - Allocate a new binder buffer + * @alloc: binder_alloc for this proc + * @data_size: size of user data buffer + * @offsets_size: user specified buffer offset + * @extra_buffers_size: size of extra space for meta-data (eg, security context) + * @is_async: buffer for async transaction + * + * Allocate a new buffer given the requested sizes. Returns + * the kernel version of the buffer pointer. The size allocated + * is the sum of the three given sizes (each rounded up to + * pointer-sized boundary) + * + * Return: The allocated buffer or %NULL if error + */ +struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) +{ + struct binder_buffer *buffer; + + mutex_lock(&alloc->mutex); + buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, + extra_buffers_size, is_async); + mutex_unlock(&alloc->mutex); + return buffer; +} + +static void *buffer_start_page(struct binder_buffer *buffer) +{ + return (void *)((uintptr_t)buffer->data & PAGE_MASK); +} + +static void *prev_buffer_end_page(struct binder_buffer *buffer) +{ + return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); +} + +static void binder_delete_free_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + struct binder_buffer *prev, *next = NULL; + bool to_free = true; + BUG_ON(alloc->buffers.next == &buffer->entry); + prev = binder_buffer_prev(buffer); + BUG_ON(!prev->free); + if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { + to_free = false; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, buffer->data, prev->data); + } + + if (!list_is_last(&buffer->entry, &alloc->buffers)) { + next = binder_buffer_next(buffer); + if (buffer_start_page(next) == buffer_start_page(buffer)) { + to_free = false; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, + buffer->data, + next->data); + } + } + + if (PAGE_ALIGNED(buffer->data)) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer start %pK is page aligned\n", + alloc->pid, buffer->data); + to_free = false; + } + + if (to_free) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK do not share page with %pK or %pK\n", + alloc->pid, buffer->data, + prev->data, next ? next->data : NULL); + binder_update_page_range(alloc, 0, buffer_start_page(buffer), + buffer_start_page(buffer) + PAGE_SIZE, + NULL); + } + list_del(&buffer->entry); + kfree(buffer); +} + +static void binder_free_buf_locked(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + size_t size, buffer_size; + + buffer_size = binder_alloc_buffer_size(alloc, buffer); + + size = ALIGN(buffer->data_size, sizeof(void *)) + + ALIGN(buffer->offsets_size, sizeof(void *)) + + ALIGN(buffer->extra_buffers_size, sizeof(void *)); + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_free_buf %pK size %zd buffer_size %zd\n", + alloc->pid, buffer, size, buffer_size); + + BUG_ON(buffer->free); + BUG_ON(size > buffer_size); + BUG_ON(buffer->transaction != NULL); + BUG_ON(buffer->data < alloc->buffer); + BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); + + if (buffer->async_transaction) { + alloc->free_async_space += size + sizeof(struct binder_buffer); + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, + "%d: binder_free_buf size %zd async free %zd\n", + alloc->pid, size, alloc->free_async_space); + } + + binder_update_page_range(alloc, 0, + (void *)PAGE_ALIGN((uintptr_t)buffer->data), + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), + NULL); + + rb_erase(&buffer->rb_node, &alloc->allocated_buffers); + buffer->free = 1; + if (!list_is_last(&buffer->entry, &alloc->buffers)) { + struct binder_buffer *next = binder_buffer_next(buffer); + + if (next->free) { + rb_erase(&next->rb_node, &alloc->free_buffers); + binder_delete_free_buffer(alloc, next); + } + } + if (alloc->buffers.next != &buffer->entry) { + struct binder_buffer *prev = binder_buffer_prev(buffer); + + if (prev->free) { + binder_delete_free_buffer(alloc, buffer); + rb_erase(&prev->rb_node, &alloc->free_buffers); + buffer = prev; + } + } + binder_insert_free_buffer(alloc, buffer); +} + +/** + * binder_alloc_free_buf() - free a binder buffer + * @alloc: binder_alloc for this proc + * @buffer: kernel pointer to buffer + * + * Free the buffer allocated via binder_alloc_new_buffer() + */ +void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + mutex_lock(&alloc->mutex); + binder_free_buf_locked(alloc, buffer); + mutex_unlock(&alloc->mutex); +} + +/** + * binder_alloc_mmap_handler() - map virtual address space for proc + * @alloc: alloc structure for this proc + * @vma: vma passed to mmap() + * + * Called by binder_mmap() to initialize the space specified in + * vma for allocating binder buffers + * + * Return: + * 0 = success + * -EBUSY = address space already mapped + * -ENOMEM = failed to map memory to given address space + */ +int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma) +{ + int ret; + struct vm_struct *area; + const char *failure_string; + struct binder_buffer *buffer; + + mutex_lock(&binder_alloc_mmap_lock); + if (alloc->buffer) { + ret = -EBUSY; + failure_string = "already mapped"; + goto err_already_mapped; + } + + area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); + if (area == NULL) { + ret = -ENOMEM; + failure_string = "get_vm_area"; + goto err_get_vm_area_failed; + } + alloc->buffer = area->addr; + alloc->user_buffer_offset = + vma->vm_start - (uintptr_t)alloc->buffer; + mutex_unlock(&binder_alloc_mmap_lock); + +#ifdef CONFIG_CPU_CACHE_VIPT + if (cache_is_vipt_aliasing()) { + while (CACHE_COLOUR( + (vma->vm_start ^ (uint32_t)alloc->buffer))) { + pr_info("%s: %d %lx-%lx maps %pK bad alignment\n", + __func__, alloc->pid, vma->vm_start, + vma->vm_end, alloc->buffer); + vma->vm_start += PAGE_SIZE; + } + } +#endif + alloc->pages = kzalloc(sizeof(alloc->pages[0]) * + ((vma->vm_end - vma->vm_start) / PAGE_SIZE), + GFP_KERNEL); + if (alloc->pages == NULL) { + ret = -ENOMEM; + failure_string = "alloc page array"; + goto err_alloc_pages_failed; + } + alloc->buffer_size = vma->vm_end - vma->vm_start; + + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + failure_string = "alloc buffer struct"; + goto err_alloc_buf_struct_failed; + } + + buffer->data = alloc->buffer; + list_add(&buffer->entry, &alloc->buffers); + buffer->free = 1; + binder_insert_free_buffer(alloc, buffer); + alloc->free_async_space = alloc->buffer_size / 2; + barrier(); + alloc->vma = vma; + alloc->vma_vm_mm = vma->vm_mm; + mmgrab(alloc->vma_vm_mm); + + return 0; + +err_alloc_buf_struct_failed: + kfree(alloc->pages); + alloc->pages = NULL; +err_alloc_pages_failed: + mutex_lock(&binder_alloc_mmap_lock); + vfree(alloc->buffer); + alloc->buffer = NULL; +err_get_vm_area_failed: +err_already_mapped: + mutex_unlock(&binder_alloc_mmap_lock); + pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, + alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); + return ret; +} + + +void binder_alloc_deferred_release(struct binder_alloc *alloc) +{ + struct rb_node *n; + int buffers, page_count; + struct binder_buffer *buffer; + + BUG_ON(alloc->vma); + + buffers = 0; + mutex_lock(&alloc->mutex); + while ((n = rb_first(&alloc->allocated_buffers))) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + + /* Transaction should already have been freed */ + BUG_ON(buffer->transaction); + + binder_free_buf_locked(alloc, buffer); + buffers++; + } + + while (!list_empty(&alloc->buffers)) { + buffer = list_first_entry(&alloc->buffers, + struct binder_buffer, entry); + WARN_ON(!buffer->free); + + list_del(&buffer->entry); + WARN_ON_ONCE(!list_empty(&alloc->buffers)); + kfree(buffer); + } + + page_count = 0; + if (alloc->pages) { + int i; + + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + void *page_addr; + bool on_lru; + + if (!alloc->pages[i].page_ptr) + continue; + + on_lru = list_lru_del(&binder_alloc_lru, + &alloc->pages[i].lru); + page_addr = alloc->buffer + i * PAGE_SIZE; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%s: %d: page %d at %pK %s\n", + __func__, alloc->pid, i, page_addr, + on_lru ? "on lru" : "active"); + unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); + __free_page(alloc->pages[i].page_ptr); + page_count++; + } + kfree(alloc->pages); + vfree(alloc->buffer); + } + mutex_unlock(&alloc->mutex); + if (alloc->vma_vm_mm) + mmdrop(alloc->vma_vm_mm); + + binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, + "%s: %d buffers %d, pages %d\n", + __func__, alloc->pid, buffers, page_count); +} + +static void print_binder_buffer(struct seq_file *m, const char *prefix, + struct binder_buffer *buffer) +{ + seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", + prefix, buffer->debug_id, buffer->data, + buffer->data_size, buffer->offsets_size, + buffer->extra_buffers_size, + buffer->transaction ? "active" : "delivered"); +} + +/** + * binder_alloc_print_allocated() - print buffer info + * @m: seq_file for output via seq_printf() + * @alloc: binder_alloc for this proc + * + * Prints information about every buffer associated with + * the binder_alloc state to the given seq_file + */ +void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc) +{ + struct rb_node *n; + + mutex_lock(&alloc->mutex); + for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) + print_binder_buffer(m, " buffer", + rb_entry(n, struct binder_buffer, rb_node)); + mutex_unlock(&alloc->mutex); +} + +/** + * binder_alloc_print_pages() - print page usage + * @m: seq_file for output via seq_printf() + * @alloc: binder_alloc for this proc + */ +void binder_alloc_print_pages(struct seq_file *m, + struct binder_alloc *alloc) +{ + struct binder_lru_page *page; + int i; + int active = 0; + int lru = 0; + int free = 0; + + mutex_lock(&alloc->mutex); + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + page = &alloc->pages[i]; + if (!page->page_ptr) + free++; + else if (list_empty(&page->lru)) + active++; + else + lru++; + } + mutex_unlock(&alloc->mutex); + seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); +} + +/** + * binder_alloc_get_allocated_count() - return count of buffers + * @alloc: binder_alloc for this proc + * + * Return: count of allocated buffers + */ +int binder_alloc_get_allocated_count(struct binder_alloc *alloc) +{ + struct rb_node *n; + int count = 0; + + mutex_lock(&alloc->mutex); + for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) + count++; + mutex_unlock(&alloc->mutex); + return count; +} + + +/** + * binder_alloc_vma_close() - invalidate address space + * @alloc: binder_alloc for this proc + * + * Called from binder_vma_close() when releasing address space. + * Clears alloc->vma to prevent new incoming transactions from + * allocating more buffers. + */ +void binder_alloc_vma_close(struct binder_alloc *alloc) +{ + WRITE_ONCE(alloc->vma, NULL); +} + +/** + * binder_alloc_free_page() - shrinker callback to free pages + * @item: item to free + * @lock: lock protecting the item + * @cb_arg: callback argument + * + * Called from list_lru_walk() in binder_shrink_scan() to free + * up pages when the system is under memory pressure. + */ +enum lru_status binder_alloc_free_page(struct list_head *item, + struct list_lru_one *lru, + spinlock_t *lock, + void *cb_arg) +{ + struct mm_struct *mm = NULL; + struct binder_lru_page *page = container_of(item, + struct binder_lru_page, + lru); + struct binder_alloc *alloc; + uintptr_t page_addr; + size_t index; + struct vm_area_struct *vma; + + alloc = page->alloc; + if (!mutex_trylock(&alloc->mutex)) + goto err_get_alloc_mutex_failed; + + if (!page->page_ptr) + goto err_page_already_freed; + + index = page - alloc->pages; + page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; + vma = alloc->vma; + if (vma) { + if (!mmget_not_zero(alloc->vma_vm_mm)) + goto err_mmget; + mm = alloc->vma_vm_mm; + if (!down_write_trylock(&mm->mmap_sem)) + goto err_down_write_mmap_sem_failed; + } + + list_lru_isolate(lru, item); + spin_unlock(lock); + + if (vma) { + trace_binder_unmap_user_start(alloc, index); + + zap_page_range(vma, + page_addr + alloc->user_buffer_offset, + PAGE_SIZE); + + trace_binder_unmap_user_end(alloc, index); + + up_write(&mm->mmap_sem); + mmput(mm); + } + + trace_binder_unmap_kernel_start(alloc, index); + + unmap_kernel_range(page_addr, PAGE_SIZE); + __free_page(page->page_ptr); + page->page_ptr = NULL; + + trace_binder_unmap_kernel_end(alloc, index); + + spin_lock(lock); + mutex_unlock(&alloc->mutex); + return LRU_REMOVED_RETRY; + +err_down_write_mmap_sem_failed: + mmput_async(mm); +err_mmget: +err_page_already_freed: + mutex_unlock(&alloc->mutex); +err_get_alloc_mutex_failed: + return LRU_SKIP; +} + +static unsigned long +binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +{ + unsigned long ret = list_lru_count(&binder_alloc_lru); + return ret; +} + +static unsigned long +binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + unsigned long ret; + + ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, + NULL, sc->nr_to_scan); + return ret; +} + +struct shrinker binder_shrinker = { + .count_objects = binder_shrink_count, + .scan_objects = binder_shrink_scan, + .seeks = DEFAULT_SEEKS, +}; + +/** + * binder_alloc_init() - called by binder_open() for per-proc initialization + * @alloc: binder_alloc for this proc + * + * Called from binder_open() to initialize binder_alloc fields for + * new binder proc + */ +void binder_alloc_init(struct binder_alloc *alloc) +{ + alloc->pid = current->group_leader->pid; + mutex_init(&alloc->mutex); + INIT_LIST_HEAD(&alloc->buffers); +} + +void binder_alloc_shrinker_init(void) +{ + list_lru_init(&binder_alloc_lru); + register_shrinker(&binder_shrinker); +} diff --git a/binder/binder_alloc.h b/binder/binder_alloc.h new file mode 100644 index 0000000..2dd33b6 --- /dev/null +++ b/binder/binder_alloc.h @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_BINDER_ALLOC_H +#define _LINUX_BINDER_ALLOC_H + +#include +#include +#include +#include +#include +#include +#include + +extern struct list_lru binder_alloc_lru; +struct binder_transaction; + +/** + * struct binder_buffer - buffer used for binder transactions + * @entry: entry alloc->buffers + * @rb_node: node for allocated_buffers/free_buffers rb trees + * @free: true if buffer is free + * @allow_user_free: describe the second member of struct blah, + * @async_transaction: describe the second member of struct blah, + * @debug_id: describe the second member of struct blah, + * @transaction: describe the second member of struct blah, + * @target_node: describe the second member of struct blah, + * @data_size: describe the second member of struct blah, + * @offsets_size: describe the second member of struct blah, + * @extra_buffers_size: describe the second member of struct blah, + * @data:i describe the second member of struct blah, + * + * Bookkeeping structure for binder transaction buffers + */ +struct binder_buffer { + struct list_head entry; /* free and allocated entries by address */ + struct rb_node rb_node; /* free entry by size or allocated entry */ + /* by address */ + unsigned free:1; + unsigned allow_user_free:1; + unsigned async_transaction:1; + unsigned free_in_progress:1; + unsigned debug_id:28; + + struct binder_transaction *transaction; + + struct binder_node *target_node; + size_t data_size; + size_t offsets_size; + size_t extra_buffers_size; + void *data; +}; + +/** + * struct binder_lru_page - page object used for binder shrinker + * @page_ptr: pointer to physical page in mmap'd space + * @lru: entry in binder_alloc_lru + * @alloc: binder_alloc for a proc + */ +struct binder_lru_page { + struct list_head lru; + struct page *page_ptr; + struct binder_alloc *alloc; +}; + +/** + * struct binder_alloc - per-binder proc state for binder allocator + * @vma: vm_area_struct passed to mmap_handler + * (invarient after mmap) + * @tsk: tid for task that called init for this proc + * (invariant after init) + * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap) + * @buffer: base of per-proc address space mapped via mmap + * @user_buffer_offset: offset between user and kernel VAs for buffer + * @buffers: list of all buffers for this proc + * @free_buffers: rb tree of buffers available for allocation + * sorted by size + * @allocated_buffers: rb tree of allocated buffers sorted by address + * @free_async_space: VA space available for async buffers. This is + * initialized at mmap time to 1/2 the full VA space + * @pages: array of binder_lru_page + * @buffer_size: size of address space specified via mmap + * @pid: pid for associated binder_proc (invariant after init) + * + * Bookkeeping structure for per-proc address space management for binder + * buffers. It is normally initialized during binder_init() and binder_mmap() + * calls. The address space is used for both user-visible buffers and for + * struct binder_buffer objects used to track the user buffers + */ +struct binder_alloc { + struct mutex mutex; + struct vm_area_struct *vma; + struct mm_struct *vma_vm_mm; + void *buffer; + ptrdiff_t user_buffer_offset; + struct list_head buffers; + struct rb_root free_buffers; + struct rb_root allocated_buffers; + size_t free_async_space; + struct binder_lru_page *pages; + size_t buffer_size; + uint32_t buffer_free; + int pid; +}; + +#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST +void binder_selftest_alloc(struct binder_alloc *alloc); +#else +static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} +#endif +enum lru_status binder_alloc_free_page(struct list_head *item, + struct list_lru_one *lru, + spinlock_t *lock, void *cb_arg); +extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async); +extern void binder_alloc_init(struct binder_alloc *alloc); +void binder_alloc_shrinker_init(void); +extern void binder_alloc_vma_close(struct binder_alloc *alloc); +extern struct binder_buffer * +binder_alloc_prepare_to_free(struct binder_alloc *alloc, + uintptr_t user_ptr); +extern void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer); +extern int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma); +extern void binder_alloc_deferred_release(struct binder_alloc *alloc); +extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc); +extern void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc); +void binder_alloc_print_pages(struct seq_file *m, + struct binder_alloc *alloc); + +/** + * binder_alloc_get_free_async_space() - get free space available for async + * @alloc: binder_alloc for this proc + * + * Return: the bytes remaining in the address-space for async transactions + */ +static inline size_t +binder_alloc_get_free_async_space(struct binder_alloc *alloc) +{ + size_t free_async_space; + + mutex_lock(&alloc->mutex); + free_async_space = alloc->free_async_space; + mutex_unlock(&alloc->mutex); + return free_async_space; +} + +/** + * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs + * @alloc: binder_alloc for this proc + * + * Return: the offset between kernel and user-space addresses to use for + * virtual address conversion + */ +static inline ptrdiff_t +binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc) +{ + /* + * user_buffer_offset is constant if vma is set and + * undefined if vma is not set. It is possible to + * get here with !alloc->vma if the target process + * is dying while a transaction is being initiated. + * Returning the old value is ok in this case and + * the transaction will fail. + */ + return alloc->user_buffer_offset; +} + +#endif /* _LINUX_BINDER_ALLOC_H */ + diff --git a/binder/binder_ctl.h b/binder/binder_ctl.h new file mode 100644 index 0000000..65b2efd --- /dev/null +++ b/binder/binder_ctl.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2018 Canonical Ltd. + * + */ + +#ifndef _UAPI_LINUX_BINDER_CTL_H +#define _UAPI_LINUX_BINDER_CTL_H + +#include +#include +#include + +#define BINDERFS_MAX_NAME 255 + +/** + * struct binderfs_device - retrieve information about a new binder device + * @name: the name to use for the new binderfs binder device + * @major: major number allocated for binderfs binder devices + * @minor: minor number allocated for the new binderfs binder device + * + */ +struct binderfs_device { + char name[BINDERFS_MAX_NAME + 1]; + __u8 major; + __u8 minor; +}; + +/** + * Allocate a new binder device. + */ +#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device) + +#endif /* _UAPI_LINUX_BINDER_CTL_H */ + diff --git a/binder/binder_internal.h b/binder/binder_internal.h new file mode 100644 index 0000000..161c993 --- /dev/null +++ b/binder/binder_internal.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_BINDER_INTERNAL_H +#define _LINUX_BINDER_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct binder_context { + struct binder_node *binder_context_mgr_node; + struct mutex context_mgr_node_lock; + kuid_t binder_context_mgr_uid; + const char *name; +}; + +/** + * struct binder_device - information about a binder device node + * @hlist: list of binder devices (only used for devices requested via + * CONFIG_ANDROID_BINDER_DEVICES) + * @miscdev: information about a binder character device node + * @context: binder context information + * @binderfs_inode: This is the inode of the root dentry of the super block + * belonging to a binderfs mount. + */ +struct binder_device { + struct hlist_node hlist; + struct miscdevice miscdev; + struct binder_context context; + struct inode *binderfs_inode; +}; + +extern const struct file_operations binder_fops; + +//#ifdef CONFIG_ANDROID_BINDERFS +extern bool is_binderfs_device(const struct inode *inode); +//#else +//static inline bool is_binderfs_device(const struct inode *inode) +//{ +// return false; +//} +//#endif + +#endif /* _LINUX_BINDER_INTERNAL_H */ diff --git a/binder/binder_trace.h b/binder/binder_trace.h new file mode 100644 index 0000000..76e3b9c --- /dev/null +++ b/binder/binder_trace.h @@ -0,0 +1,387 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM binder + +#if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _BINDER_TRACE_H + +#include + +struct binder_buffer; +struct binder_node; +struct binder_proc; +struct binder_alloc; +struct binder_ref_data; +struct binder_thread; +struct binder_transaction; + +TRACE_EVENT(binder_ioctl, + TP_PROTO(unsigned int cmd, unsigned long arg), + TP_ARGS(cmd, arg), + + TP_STRUCT__entry( + __field(unsigned int, cmd) + __field(unsigned long, arg) + ), + TP_fast_assign( + __entry->cmd = cmd; + __entry->arg = arg; + ), + TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg) +); + +DECLARE_EVENT_CLASS(binder_lock_class, + TP_PROTO(const char *tag), + TP_ARGS(tag), + TP_STRUCT__entry( + __field(const char *, tag) + ), + TP_fast_assign( + __entry->tag = tag; + ), + TP_printk("tag=%s", __entry->tag) +); + +#define DEFINE_BINDER_LOCK_EVENT(name) \ +DEFINE_EVENT(binder_lock_class, name, \ + TP_PROTO(const char *func), \ + TP_ARGS(func)) + +DEFINE_BINDER_LOCK_EVENT(binder_lock); +DEFINE_BINDER_LOCK_EVENT(binder_locked); +DEFINE_BINDER_LOCK_EVENT(binder_unlock); + +DECLARE_EVENT_CLASS(binder_function_return_class, + TP_PROTO(int ret), + TP_ARGS(ret), + TP_STRUCT__entry( + __field(int, ret) + ), + TP_fast_assign( + __entry->ret = ret; + ), + TP_printk("ret=%d", __entry->ret) +); + +#define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name) \ +DEFINE_EVENT(binder_function_return_class, name, \ + TP_PROTO(int ret), \ + TP_ARGS(ret)) + +DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done); +DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done); +DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done); + +TRACE_EVENT(binder_wait_for_work, + TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo), + TP_ARGS(proc_work, transaction_stack, thread_todo), + + TP_STRUCT__entry( + __field(bool, proc_work) + __field(bool, transaction_stack) + __field(bool, thread_todo) + ), + TP_fast_assign( + __entry->proc_work = proc_work; + __entry->transaction_stack = transaction_stack; + __entry->thread_todo = thread_todo; + ), + TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d", + __entry->proc_work, __entry->transaction_stack, + __entry->thread_todo) +); + +TRACE_EVENT(binder_transaction, + TP_PROTO(bool reply, struct binder_transaction *t, + struct binder_node *target_node), + TP_ARGS(reply, t, target_node), + TP_STRUCT__entry( + __field(int, debug_id) + __field(int, target_node) + __field(int, to_proc) + __field(int, to_thread) + __field(int, reply) + __field(unsigned int, code) + __field(unsigned int, flags) + ), + TP_fast_assign( + __entry->debug_id = t->debug_id; + __entry->target_node = target_node ? target_node->debug_id : 0; + __entry->to_proc = t->to_proc->pid; + __entry->to_thread = t->to_thread ? t->to_thread->pid : 0; + __entry->reply = reply; + __entry->code = t->code; + __entry->flags = t->flags; + ), + TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x", + __entry->debug_id, __entry->target_node, + __entry->to_proc, __entry->to_thread, + __entry->reply, __entry->flags, __entry->code) +); + +TRACE_EVENT(binder_transaction_received, + TP_PROTO(struct binder_transaction *t), + TP_ARGS(t), + + TP_STRUCT__entry( + __field(int, debug_id) + ), + TP_fast_assign( + __entry->debug_id = t->debug_id; + ), + TP_printk("transaction=%d", __entry->debug_id) +); + +TRACE_EVENT(binder_transaction_node_to_ref, + TP_PROTO(struct binder_transaction *t, struct binder_node *node, + struct binder_ref_data *rdata), + TP_ARGS(t, node, rdata), + + TP_STRUCT__entry( + __field(int, debug_id) + __field(int, node_debug_id) + __field(binder_uintptr_t, node_ptr) + __field(int, ref_debug_id) + __field(uint32_t, ref_desc) + ), + TP_fast_assign( + __entry->debug_id = t->debug_id; + __entry->node_debug_id = node->debug_id; + __entry->node_ptr = node->ptr; + __entry->ref_debug_id = rdata->debug_id; + __entry->ref_desc = rdata->desc; + ), + TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d", + __entry->debug_id, __entry->node_debug_id, + (u64)__entry->node_ptr, + __entry->ref_debug_id, __entry->ref_desc) +); + +TRACE_EVENT(binder_transaction_ref_to_node, + TP_PROTO(struct binder_transaction *t, struct binder_node *node, + struct binder_ref_data *rdata), + TP_ARGS(t, node, rdata), + + TP_STRUCT__entry( + __field(int, debug_id) + __field(int, ref_debug_id) + __field(uint32_t, ref_desc) + __field(int, node_debug_id) + __field(binder_uintptr_t, node_ptr) + ), + TP_fast_assign( + __entry->debug_id = t->debug_id; + __entry->ref_debug_id = rdata->debug_id; + __entry->ref_desc = rdata->desc; + __entry->node_debug_id = node->debug_id; + __entry->node_ptr = node->ptr; + ), + TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx", + __entry->debug_id, __entry->node_debug_id, + __entry->ref_debug_id, __entry->ref_desc, + (u64)__entry->node_ptr) +); + +TRACE_EVENT(binder_transaction_ref_to_ref, + TP_PROTO(struct binder_transaction *t, struct binder_node *node, + struct binder_ref_data *src_ref, + struct binder_ref_data *dest_ref), + TP_ARGS(t, node, src_ref, dest_ref), + + TP_STRUCT__entry( + __field(int, debug_id) + __field(int, node_debug_id) + __field(int, src_ref_debug_id) + __field(uint32_t, src_ref_desc) + __field(int, dest_ref_debug_id) + __field(uint32_t, dest_ref_desc) + ), + TP_fast_assign( + __entry->debug_id = t->debug_id; + __entry->node_debug_id = node->debug_id; + __entry->src_ref_debug_id = src_ref->debug_id; + __entry->src_ref_desc = src_ref->desc; + __entry->dest_ref_debug_id = dest_ref->debug_id; + __entry->dest_ref_desc = dest_ref->desc; + ), + TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d", + __entry->debug_id, __entry->node_debug_id, + __entry->src_ref_debug_id, __entry->src_ref_desc, + __entry->dest_ref_debug_id, __entry->dest_ref_desc) +); + +TRACE_EVENT(binder_transaction_fd, + TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd), + TP_ARGS(t, src_fd, dest_fd), + + TP_STRUCT__entry( + __field(int, debug_id) + __field(int, src_fd) + __field(int, dest_fd) + ), + TP_fast_assign( + __entry->debug_id = t->debug_id; + __entry->src_fd = src_fd; + __entry->dest_fd = dest_fd; + ), + TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d", + __entry->debug_id, __entry->src_fd, __entry->dest_fd) +); + +DECLARE_EVENT_CLASS(binder_buffer_class, + TP_PROTO(struct binder_buffer *buf), + TP_ARGS(buf), + TP_STRUCT__entry( + __field(int, debug_id) + __field(size_t, data_size) + __field(size_t, offsets_size) + ), + TP_fast_assign( + __entry->debug_id = buf->debug_id; + __entry->data_size = buf->data_size; + __entry->offsets_size = buf->offsets_size; + ), + TP_printk("transaction=%d data_size=%zd offsets_size=%zd", + __entry->debug_id, __entry->data_size, __entry->offsets_size) +); + +DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf, + TP_PROTO(struct binder_buffer *buffer), + TP_ARGS(buffer)); + +DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release, + TP_PROTO(struct binder_buffer *buffer), + TP_ARGS(buffer)); + +DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, + TP_PROTO(struct binder_buffer *buffer), + TP_ARGS(buffer)); + +TRACE_EVENT(binder_update_page_range, + TP_PROTO(struct binder_alloc *alloc, bool allocate, + void *start, void *end), + TP_ARGS(alloc, allocate, start, end), + TP_STRUCT__entry( + __field(int, proc) + __field(bool, allocate) + __field(size_t, offset) + __field(size_t, size) + ), + TP_fast_assign( + __entry->proc = alloc->pid; + __entry->allocate = allocate; + __entry->offset = start - alloc->buffer; + __entry->size = end - start; + ), + TP_printk("proc=%d allocate=%d offset=%zu size=%zu", + __entry->proc, __entry->allocate, + __entry->offset, __entry->size) +); + +DECLARE_EVENT_CLASS(binder_lru_page_class, + TP_PROTO(const struct binder_alloc *alloc, size_t page_index), + TP_ARGS(alloc, page_index), + TP_STRUCT__entry( + __field(int, proc) + __field(size_t, page_index) + ), + TP_fast_assign( + __entry->proc = alloc->pid; + __entry->page_index = page_index; + ), + TP_printk("proc=%d page_index=%zu", + __entry->proc, __entry->page_index) +); + +DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start, + TP_PROTO(const struct binder_alloc *alloc, size_t page_index), + TP_ARGS(alloc, page_index)); + +DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end, + TP_PROTO(const struct binder_alloc *alloc, size_t page_index), + TP_ARGS(alloc, page_index)); + +DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start, + TP_PROTO(const struct binder_alloc *alloc, size_t page_index), + TP_ARGS(alloc, page_index)); + +DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end, + TP_PROTO(const struct binder_alloc *alloc, size_t page_index), + TP_ARGS(alloc, page_index)); + +DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start, + TP_PROTO(const struct binder_alloc *alloc, size_t page_index), + TP_ARGS(alloc, page_index)); + +DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end, + TP_PROTO(const struct binder_alloc *alloc, size_t page_index), + TP_ARGS(alloc, page_index)); + +DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start, + TP_PROTO(const struct binder_alloc *alloc, size_t page_index), + TP_ARGS(alloc, page_index)); + +DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end, + TP_PROTO(const struct binder_alloc *alloc, size_t page_index), + TP_ARGS(alloc, page_index)); + +DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start, + TP_PROTO(const struct binder_alloc *alloc, size_t page_index), + TP_ARGS(alloc, page_index)); + +DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end, + TP_PROTO(const struct binder_alloc *alloc, size_t page_index), + TP_ARGS(alloc, page_index)); + +TRACE_EVENT(binder_command, + TP_PROTO(uint32_t cmd), + TP_ARGS(cmd), + TP_STRUCT__entry( + __field(uint32_t, cmd) + ), + TP_fast_assign( + __entry->cmd = cmd; + ), + TP_printk("cmd=0x%x %s", + __entry->cmd, + _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ? + binder_command_strings[_IOC_NR(__entry->cmd)] : + "unknown") +); + +TRACE_EVENT(binder_return, + TP_PROTO(uint32_t cmd), + TP_ARGS(cmd), + TP_STRUCT__entry( + __field(uint32_t, cmd) + ), + TP_fast_assign( + __entry->cmd = cmd; + ), + TP_printk("cmd=0x%x %s", + __entry->cmd, + _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ? + binder_return_strings[_IOC_NR(__entry->cmd)] : + "unknown") +); + +#endif /* _BINDER_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE binder_trace +#include diff --git a/binder/binderfs.c b/binder/binderfs.c new file mode 100644 index 0000000..f54f842 --- /dev/null +++ b/binder/binderfs.c @@ -0,0 +1,550 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "binder_ctl.h" +#include "binder_internal.h" + +#define FIRST_INODE 1 +#define SECOND_INODE 2 +#define INODE_OFFSET 3 +#define INTSTRLEN 21 +#define BINDERFS_MAX_MINOR (1U << MINORBITS) + +#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 + +static struct vfsmount *binderfs_mnt; + +static dev_t binderfs_dev; +static DEFINE_MUTEX(binderfs_minors_mutex); +static DEFINE_IDA(binderfs_minors); + +/** + * binderfs_info - information about a binderfs mount + * @ipc_ns: The ipc namespace the binderfs mount belongs to. + * @control_dentry: This records the dentry of this binderfs mount + * binder-control device. + * @root_uid: uid that needs to be used when a new binder device is + * created. + * @root_gid: gid that needs to be used when a new binder device is + * created. + */ +struct binderfs_info { + struct ipc_namespace *ipc_ns; + struct dentry *control_dentry; + kuid_t root_uid; + kgid_t root_gid; + +}; + +static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) +{ + return inode->i_sb->s_fs_info; +} + +/* +bool is_binderfs_device(const struct inode *inode) +{ + if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC) + return true; + + return false; +} +*/ +/** + * binderfs_binder_device_create - allocate inode from super block of a + * binderfs mount + * @ref_inode: inode from wich the super block will be taken + * @userp: buffer to copy information about new device for userspace to + * @req: struct binderfs_device as copied from userspace + * + * This function allocated a new binder_device and reserves a new minor + * number for it. + * Minor numbers are limited and tracked globally in binderfs_minors. The + * function will stash a struct binder_device for the specific binder + * device in i_private of the inode. + * It will go on to allocate a new inode from the super block of the + * filesystem mount, stash a struct binder_device in its i_private field + * and attach a dentry to that inode. + * + * Return: 0 on success, negative errno on failure + */ +static int binderfs_binder_device_create(struct inode *ref_inode, + struct binderfs_device __user *userp, + struct binderfs_device *req) +{ + int minor, ret; + struct dentry *dentry, *dup, *root; + struct binder_device *device; + size_t name_len = BINDERFS_MAX_NAME + 1; + char *name = NULL; + struct inode *inode = NULL; + struct super_block *sb = ref_inode->i_sb; + struct binderfs_info *info = sb->s_fs_info; + + /* Reserve new minor number for the new device. */ + mutex_lock(&binderfs_minors_mutex); + //minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); + minor = ida_simple_get(&binderfs_minors, 0, BINDERFS_MAX_MINOR, GFP_KERNEL); + mutex_unlock(&binderfs_minors_mutex); + if (minor < 0) + return minor; + + ret = -ENOMEM; + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) + goto err; + + inode = new_inode(sb); + if (!inode) + goto err; + + inode->i_ino = minor + INODE_OFFSET; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); + init_special_inode(inode, S_IFCHR | 0600, + MKDEV(MAJOR(binderfs_dev), minor)); + inode->i_fop = &binder_fops; + inode->i_uid = info->root_uid; + inode->i_gid = info->root_gid; + + name = kmalloc(name_len, GFP_KERNEL); + if (!name) + goto err; + + strscpy(name, req->name, name_len); + + device->binderfs_inode = inode; + device->context.binder_context_mgr_uid = INVALID_UID; + device->context.name = name; + device->miscdev.name = name; + device->miscdev.minor = minor; + mutex_init(&device->context.context_mgr_node_lock); + + req->major = MAJOR(binderfs_dev); + req->minor = minor; + + ret = copy_to_user(userp, req, sizeof(*req)); + if (ret) { + ret = -EFAULT; + goto err; + } + + root = sb->s_root; + inode_lock(d_inode(root)); + dentry = d_alloc_name(root, name); + if (!dentry) { + inode_unlock(d_inode(root)); + ret = -ENOMEM; + goto err; + } + + /* Verify that the name userspace gave us is not already in use. */ + dup = d_lookup(root, &dentry->d_name); + if (dup) { + if (d_really_is_positive(dup)) { + dput(dup); + dput(dentry); + inode_unlock(d_inode(root)); + ret = -EEXIST; + goto err; + } + dput(dup); + } + + inode->i_private = device; + d_add(dentry, inode); + fsnotify_create(root->d_inode, dentry); + inode_unlock(d_inode(root)); + + return 0; + +err: + kfree(name); + kfree(device); + mutex_lock(&binderfs_minors_mutex); + //ida_free(&binderfs_minors, minor); + ida_simple_remove(&binderfs_minors, minor); + mutex_unlock(&binderfs_minors_mutex); + iput(inode); + + return ret; +} + +/** + * binderfs_ctl_ioctl - handle binder device node allocation requests + * + * The request handler for the binder-control device. All requests operate on + * the binderfs mount the binder-control device resides in: + * - BINDER_CTL_ADD + * Allocate a new binder device. + * + * Return: 0 on success, negative errno on failure + */ +static long binder_ctl_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -EINVAL; + struct inode *inode = file_inode(file); + struct binderfs_device __user *device = (struct binderfs_device __user *)arg; + struct binderfs_device device_req; + + switch (cmd) { + case BINDER_CTL_ADD: + ret = copy_from_user(&device_req, device, sizeof(device_req)); + if (ret) { + ret = -EFAULT; + break; + } + + ret = binderfs_binder_device_create(inode, device, &device_req); + break; + default: + break; + } + + return ret; +} + +static void binderfs_evict_inode(struct inode *inode) +{ + struct binder_device *device = inode->i_private; + + clear_inode(inode); + + if (!device) + return; + + mutex_lock(&binderfs_minors_mutex); + //ida_free(&binderfs_minors, device->miscdev.minor); + ida_simple_remove(&binderfs_minors, device->miscdev.minor); + mutex_unlock(&binderfs_minors_mutex); + + kfree(device->context.name); + kfree(device); +} + +static const struct super_operations binderfs_super_ops = { + .statfs = simple_statfs, + .evict_inode = binderfs_evict_inode, +}; + +static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) +{ + struct inode *inode = d_inode(old_dentry); + + /* binderfs doesn't support directories. */ + if (d_is_dir(old_dentry)) + return -EPERM; + + if (flags & ~RENAME_NOREPLACE) + return -EINVAL; + + if (!simple_empty(new_dentry)) + return -ENOTEMPTY; + + if (d_really_is_positive(new_dentry)) + simple_unlink(new_dir, new_dentry); + + old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime = + new_dir->i_mtime = inode->i_ctime = current_time(old_dir); + + return 0; +} + +static int binderfs_unlink(struct inode *dir, struct dentry *dentry) +{ + /* + * The control dentry is only ever touched during mount so checking it + * here should not require us to take lock. + */ + if (BINDERFS_I(dir)->control_dentry == dentry) + return -EPERM; + + return simple_unlink(dir, dentry); +} + +static const struct file_operations binder_ctl_fops = { + .owner = THIS_MODULE, + .open = nonseekable_open, + .unlocked_ioctl = binder_ctl_ioctl, + .compat_ioctl = binder_ctl_ioctl, + .llseek = noop_llseek, +}; + +/** + * binderfs_binder_ctl_create - create a new binder-control device + * @sb: super block of the binderfs mount + * + * This function creates a new binder-control device node in the binderfs mount + * referred to by @sb. + * + * Return: 0 on success, negative errno on failure + */ +static int binderfs_binder_ctl_create(struct super_block *sb) +{ + int minor, ret; + struct dentry *dentry; + struct binder_device *device; + struct inode *inode = NULL; + struct dentry *root = sb->s_root; + struct binderfs_info *info = sb->s_fs_info; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) + return -ENOMEM; + + inode_lock(d_inode(root)); + + /* If we have already created a binder-control node, return. */ + if (info->control_dentry) { + ret = 0; + goto out; + } + + ret = -ENOMEM; + inode = new_inode(sb); + if (!inode) + goto out; + + /* Reserve a new minor number for the new device. */ + mutex_lock(&binderfs_minors_mutex); + //minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); + minor = ida_simple_get(&binderfs_minors, 0, BINDERFS_MAX_MINOR, GFP_KERNEL); + mutex_unlock(&binderfs_minors_mutex); + if (minor < 0) { + ret = minor; + goto out; + } + + inode->i_ino = SECOND_INODE; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); + init_special_inode(inode, S_IFCHR | 0600, + MKDEV(MAJOR(binderfs_dev), minor)); + inode->i_fop = &binder_ctl_fops; + inode->i_uid = info->root_uid; + inode->i_gid = info->root_gid; + + device->binderfs_inode = inode; + device->miscdev.minor = minor; + + dentry = d_alloc_name(root, "binder-control"); + if (!dentry) + goto out; + + inode->i_private = device; + info->control_dentry = dentry; + d_add(dentry, inode); + inode_unlock(d_inode(root)); + + return 0; + +out: + inode_unlock(d_inode(root)); + kfree(device); + iput(inode); + + return ret; +} + +static const struct inode_operations binderfs_dir_inode_operations = { + .lookup = simple_lookup, + .rename = binderfs_rename, + .unlink = binderfs_unlink, +}; + +static int binderfs_fill_super(struct super_block *sb, void *data, int silent) +{ + struct binderfs_info *info; + int ret = -ENOMEM; + struct inode *inode = NULL; + struct ipc_namespace *ipc_ns = sb->s_fs_info; + + get_ipc_ns(ipc_ns); + + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; + + /* + * The binderfs filesystem can be mounted by userns root in a + * non-initial userns. By default such mounts have the SB_I_NODEV flag + * set in s_iflags to prevent security issues where userns root can + * just create random device nodes via mknod() since it owns the + * filesystem mount. But binderfs does not allow to create any files + * including devices nodes. The only way to create binder devices nodes + * is through the binder-control device which userns root is explicitly + * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both + * necessary and safe. + */ + sb->s_iflags &= ~SB_I_NODEV; + sb->s_iflags |= SB_I_NOEXEC; + sb->s_magic = BINDERFS_SUPER_MAGIC; + sb->s_op = &binderfs_super_ops; + sb->s_time_gran = 1; + + info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); + if (!info) + goto err_without_dentry; + + info->ipc_ns = ipc_ns; + info->root_gid = make_kgid(sb->s_user_ns, 0); + if (!gid_valid(info->root_gid)) + info->root_gid = GLOBAL_ROOT_GID; + info->root_uid = make_kuid(sb->s_user_ns, 0); + if (!uid_valid(info->root_uid)) + info->root_uid = GLOBAL_ROOT_UID; + + sb->s_fs_info = info; + + inode = new_inode(sb); + if (!inode) + goto err_without_dentry; + + inode->i_ino = FIRST_INODE; + inode->i_fop = &simple_dir_operations; + inode->i_mode = S_IFDIR | 0755; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); + inode->i_op = &binderfs_dir_inode_operations; + set_nlink(inode, 2); + + sb->s_root = d_make_root(inode); + if (!sb->s_root) + goto err_without_dentry; + + ret = binderfs_binder_ctl_create(sb); + if (ret) + goto err_with_dentry; + + return 0; + +err_with_dentry: + dput(sb->s_root); + sb->s_root = NULL; + +err_without_dentry: + put_ipc_ns(ipc_ns); + iput(inode); + kfree(info); + + return ret; +} + +static int binderfs_test_super(struct super_block *sb, void *data) +{ + struct binderfs_info *info = sb->s_fs_info; + + if (info) + return info->ipc_ns == data; + + return 0; +} + +static int binderfs_set_super(struct super_block *sb, void *data) +{ + sb->s_fs_info = data; + return set_anon_super(sb, NULL); +} + +static struct dentry *binderfs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data) +{ + struct super_block *sb; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + + if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super, + flags, ipc_ns->user_ns, ipc_ns); + if (IS_ERR(sb)) + return ERR_CAST(sb); + + if (!sb->s_root) { + int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0); + if (ret) { + deactivate_locked_super(sb); + return ERR_PTR(ret); + } + + sb->s_flags |= SB_ACTIVE; + } + + return dget(sb->s_root); +} + +static void binderfs_kill_super(struct super_block *sb) +{ + struct binderfs_info *info = sb->s_fs_info; + + if (info && info->ipc_ns) + put_ipc_ns(info->ipc_ns); + + kfree(info); + kill_litter_super(sb); +} + +static struct file_system_type binder_fs_type = { + .name = "binder", + .mount = binderfs_mount, + .kill_sb = binderfs_kill_super, + .fs_flags = FS_USERNS_MOUNT, +}; + +static int __init init_binderfs(void) +{ + int ret; + + /* Allocate new major number for binderfs. */ + ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR, + "binder"); + if (ret) + return ret; + + ret = register_filesystem(&binder_fs_type); + if (ret) { + unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR); + return ret; + } + + binderfs_mnt = kern_mount(&binder_fs_type); + if (IS_ERR(binderfs_mnt)) { + ret = PTR_ERR(binderfs_mnt); + binderfs_mnt = NULL; + unregister_filesystem(&binder_fs_type); + unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR); + } + + return ret; +} + +module_init(init_binderfs); +MODULE_LICENSE("GPL v2"); diff --git a/binder/deps.c b/binder/deps.c new file mode 100644 index 0000000..b1a108c --- /dev/null +++ b/binder/deps.c @@ -0,0 +1,161 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 + +static struct vm_struct *(*get_vm_area_ptr)(unsigned long, unsigned long) = NULL; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +static void (*zap_page_range_ptr)(struct vm_area_struct *, unsigned long, unsigned long) = NULL; +#else +static void (*zap_page_range_ptr)(struct vm_area_struct *, unsigned long, unsigned long, struct zap_details *) = NULL; +#endif +static int (*map_kernel_range_noflush_ptr)(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) = NULL; +static struct files_struct *(*get_files_struct_ptr)(struct task_struct *) = NULL; +static void (*put_files_struct_ptr)(struct files_struct *) = NULL; +static struct sighand_struct *(*__lock_task_sighand_ptr)(struct task_struct *, unsigned long *) = NULL; +static int (*__alloc_fd_ptr)(struct files_struct *files, unsigned start, unsigned end, unsigned flags) = NULL; +static void (*__fd_install_ptr)(struct files_struct *files, unsigned int fd, struct file *file) = NULL; +static int (*__close_fd_ptr)(struct files_struct *files, unsigned int fd) = NULL; +static int (*can_nice_ptr)(const struct task_struct *, const int) = NULL; +static int (*security_binder_set_context_mgr_ptr)(struct task_struct *mgr) = NULL; +static int (*security_binder_transaction_ptr)(struct task_struct *from, struct task_struct *to) = NULL; +static int (*security_binder_transfer_binder_ptr)(struct task_struct *from, struct task_struct *to) = NULL; +static int (*security_binder_transfer_file_ptr)(struct task_struct *from, struct task_struct *to, struct file *file) = NULL; +static void (*mmput_async_ptr)(struct mm_struct *) = NULL; +static void (*put_ipc_ns_ptr)(struct ipc_namespace *) = NULL; + +struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) +{ + if (!get_vm_area_ptr) + get_vm_area_ptr = kallsyms_lookup_name("get_vm_area"); + return get_vm_area_ptr(size, flags); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size) +#else +void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) +#endif +{ + if (!zap_page_range_ptr) + zap_page_range_ptr = kallsyms_lookup_name("zap_page_range"); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) + zap_page_range_ptr(vma, address, size); +#else + zap_page_range_ptr(vma, address, size, details); +#endif +} + +int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) +{ + if (!map_kernel_range_noflush_ptr) + map_kernel_range_noflush_ptr = kallsyms_lookup_name("map_kernel_range_noflush"); + return map_kernel_range_noflush_ptr(start, size, prot, pages); +} + +struct files_struct *get_files_struct(struct task_struct *task) +{ + if (!get_files_struct_ptr) + get_files_struct_ptr = kallsyms_lookup_name("get_files_struct"); + return get_files_struct_ptr(task); +} + +void put_files_struct(struct files_struct *files) +{ + if (!put_files_struct_ptr) + put_files_struct_ptr = kallsyms_lookup_name("put_files_struct"); + put_files_struct_ptr(files); +} + +struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags) +{ + if (!__lock_task_sighand_ptr) + __lock_task_sighand_ptr = kallsyms_lookup_name("__lock_task_sighand"); + return __lock_task_sighand_ptr(tsk, flags); +} + +int __alloc_fd(struct files_struct *files, unsigned start, unsigned end, unsigned flags) +{ + if (!__alloc_fd_ptr) + __alloc_fd_ptr = kallsyms_lookup_name("__alloc_fd"); + return __alloc_fd_ptr(files, start, end, flags); +} + +void __fd_install(struct files_struct *files, unsigned int fd, struct file *file) +{ + if (!__fd_install_ptr) + __fd_install_ptr = kallsyms_lookup_name("__fd_install"); + __fd_install_ptr(files, fd, file); +} + +int __close_fd(struct files_struct *files, unsigned int fd) +{ + if (!__close_fd_ptr) + __close_fd_ptr = kallsyms_lookup_name("__close_fd"); + return __close_fd_ptr(files, fd); +} + +int can_nice(const struct task_struct *p, const int nice) +{ + if (!can_nice_ptr) + can_nice_ptr = kallsyms_lookup_name("can_nice"); + return can_nice_ptr(p, nice); +} + +int security_binder_set_context_mgr(struct task_struct *mgr) +{ + if (!security_binder_set_context_mgr_ptr) + security_binder_set_context_mgr_ptr = kallsyms_lookup_name("security_binder_set_context_mgr"); + return security_binder_set_context_mgr_ptr(mgr); +} + +int security_binder_transaction(struct task_struct *from, struct task_struct *to) +{ + if (!security_binder_transaction_ptr) + security_binder_transaction_ptr = kallsyms_lookup_name("security_binder_transaction"); + return security_binder_transaction_ptr(from, to); +} + +int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to) +{ + if (!security_binder_transfer_binder_ptr) + security_binder_transfer_binder_ptr = kallsyms_lookup_name("security_binder_transfer_binder"); + return security_binder_transfer_binder_ptr(from, to); +} + +int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file) +{ + if (!security_binder_transfer_file_ptr) + security_binder_transfer_file_ptr = kallsyms_lookup_name("security_binder_transfer_file"); + return security_binder_transfer_file_ptr(from, to, file); +} + +void mmput_async(struct mm_struct *mm) +{ + if (!mmput_async_ptr) + mmput_async_ptr = kallsyms_lookup_name("mmput_async"); + mmput_async_ptr(mm); +} + +bool is_binderfs_device(const struct inode *inode) +{ + if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC) + return true; + + return false; +} + +void put_ipc_ns(struct ipc_namespace *ns) +{ + if (!put_ipc_ns_ptr) + put_ipc_ns_ptr = kallsyms_lookup_name("put_ipc_ns"); + put_ipc_ns_ptr(ns); +} diff --git a/mac80211_hwsim/Makefile b/mac80211_hwsim/Makefile new file mode 100644 index 0000000..16b33f7 --- /dev/null +++ b/mac80211_hwsim/Makefile @@ -0,0 +1,6 @@ +obj-m := mac80211_hwsim.o + +all: + $(MAKE) -C "/lib/modules/$(shell uname -r)/build" M=$(shell pwd) modules +clean: + rm -rf deps.h *.o *.o.ur-safe *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions diff --git a/mac80211_hwsim/mac80211_hwsim.c b/mac80211_hwsim/mac80211_hwsim.c new file mode 100644 index 0000000..6467ffa --- /dev/null +++ b/mac80211_hwsim/mac80211_hwsim.c @@ -0,0 +1,3590 @@ +/* + * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 + * Copyright (c) 2008, Jouni Malinen + * Copyright (c) 2011, Javier Lopez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * TODO: + * - Add TSF sync and fix IBSS beacon transmission by adding + * competition for "air time" at TBTT + * - RX filtering based on filter configuration (data->rx_filter) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mac80211_hwsim.h" + +#define WARN_QUEUE 100 +#define MAX_QUEUE 200 + +MODULE_AUTHOR("Jouni Malinen"); +MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); +MODULE_LICENSE("GPL"); + +static int radios = 2; +module_param(radios, int, 0444); +MODULE_PARM_DESC(radios, "Number of simulated radios"); + +static int channels = 1; +module_param(channels, int, 0444); +MODULE_PARM_DESC(channels, "Number of concurrent channels"); + +static bool paged_rx = false; +module_param(paged_rx, bool, 0644); +MODULE_PARM_DESC(paged_rx, "Use paged SKBs for RX instead of linear ones"); + +static bool rctbl = false; +module_param(rctbl, bool, 0444); +MODULE_PARM_DESC(rctbl, "Handle rate control table"); + +static bool support_p2p_device = true; +module_param(support_p2p_device, bool, 0444); +MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type"); + +/** + * enum hwsim_regtest - the type of regulatory tests we offer + * + * These are the different values you can use for the regtest + * module parameter. This is useful to help test world roaming + * and the driver regulatory_hint() call and combinations of these. + * If you want to do specific alpha2 regulatory domain tests simply + * use the userspace regulatory request as that will be respected as + * well without the need of this module parameter. This is designed + * only for testing the driver regulatory request, world roaming + * and all possible combinations. + * + * @HWSIM_REGTEST_DISABLED: No regulatory tests are performed, + * this is the default value. + * @HWSIM_REGTEST_DRIVER_REG_FOLLOW: Used for testing the driver regulatory + * hint, only one driver regulatory hint will be sent as such the + * secondary radios are expected to follow. + * @HWSIM_REGTEST_DRIVER_REG_ALL: Used for testing the driver regulatory + * request with all radios reporting the same regulatory domain. + * @HWSIM_REGTEST_DIFF_COUNTRY: Used for testing the drivers calling + * different regulatory domains requests. Expected behaviour is for + * an intersection to occur but each device will still use their + * respective regulatory requested domains. Subsequent radios will + * use the resulting intersection. + * @HWSIM_REGTEST_WORLD_ROAM: Used for testing the world roaming. We accomplish + * this by using a custom beacon-capable regulatory domain for the first + * radio. All other device world roam. + * @HWSIM_REGTEST_CUSTOM_WORLD: Used for testing the custom world regulatory + * domain requests. All radios will adhere to this custom world regulatory + * domain. + * @HWSIM_REGTEST_CUSTOM_WORLD_2: Used for testing 2 custom world regulatory + * domain requests. The first radio will adhere to the first custom world + * regulatory domain, the second one to the second custom world regulatory + * domain. All other devices will world roam. + * @HWSIM_REGTEST_STRICT_FOLLOW_: Used for testing strict regulatory domain + * settings, only the first radio will send a regulatory domain request + * and use strict settings. The rest of the radios are expected to follow. + * @HWSIM_REGTEST_STRICT_ALL: Used for testing strict regulatory domain + * settings. All radios will adhere to this. + * @HWSIM_REGTEST_STRICT_AND_DRIVER_REG: Used for testing strict regulatory + * domain settings, combined with secondary driver regulatory domain + * settings. The first radio will get a strict regulatory domain setting + * using the first driver regulatory request and the second radio will use + * non-strict settings using the second driver regulatory request. All + * other devices should follow the intersection created between the + * first two. + * @HWSIM_REGTEST_ALL: Used for testing every possible mix. You will need + * at least 6 radios for a complete test. We will test in this order: + * 1 - driver custom world regulatory domain + * 2 - second custom world regulatory domain + * 3 - first driver regulatory domain request + * 4 - second driver regulatory domain request + * 5 - strict regulatory domain settings using the third driver regulatory + * domain request + * 6 and on - should follow the intersection of the 3rd, 4rth and 5th radio + * regulatory requests. + */ +enum hwsim_regtest { + HWSIM_REGTEST_DISABLED = 0, + HWSIM_REGTEST_DRIVER_REG_FOLLOW = 1, + HWSIM_REGTEST_DRIVER_REG_ALL = 2, + HWSIM_REGTEST_DIFF_COUNTRY = 3, + HWSIM_REGTEST_WORLD_ROAM = 4, + HWSIM_REGTEST_CUSTOM_WORLD = 5, + HWSIM_REGTEST_CUSTOM_WORLD_2 = 6, + HWSIM_REGTEST_STRICT_FOLLOW = 7, + HWSIM_REGTEST_STRICT_ALL = 8, + HWSIM_REGTEST_STRICT_AND_DRIVER_REG = 9, + HWSIM_REGTEST_ALL = 10, +}; + +/* Set to one of the HWSIM_REGTEST_* values above */ +static int regtest = HWSIM_REGTEST_DISABLED; +module_param(regtest, int, 0444); +MODULE_PARM_DESC(regtest, "The type of regulatory test we want to run"); + +static const char *hwsim_alpha2s[] = { + "FI", + "AL", + "US", + "DE", + "JP", + "AL", +}; + +static const struct ieee80211_regdomain hwsim_world_regdom_custom_01 = { + .n_reg_rules = 4, + .alpha2 = "99", + .reg_rules = { + REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), + REG_RULE(2484-10, 2484+10, 40, 0, 20, 0), + REG_RULE(5150-10, 5240+10, 40, 0, 30, 0), + REG_RULE(5745-10, 5825+10, 40, 0, 30, 0), + } +}; + +static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = { + .n_reg_rules = 2, + .alpha2 = "99", + .reg_rules = { + REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), + REG_RULE(5725-10, 5850+10, 40, 0, 30, + NL80211_RRF_NO_IR), + } +}; + +static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = { + &hwsim_world_regdom_custom_01, + &hwsim_world_regdom_custom_02, +}; + +struct hwsim_vif_priv { + u32 magic; + u8 bssid[ETH_ALEN]; + bool assoc; + bool bcn_en; + u16 aid; +}; + +#define HWSIM_VIF_MAGIC 0x69537748 + +static inline void hwsim_check_magic(struct ieee80211_vif *vif) +{ + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + WARN(vp->magic != HWSIM_VIF_MAGIC, + "Invalid VIF (%p) magic %#x, %pM, %d/%d\n", + vif, vp->magic, vif->addr, vif->type, vif->p2p); +} + +static inline void hwsim_set_magic(struct ieee80211_vif *vif) +{ + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + vp->magic = HWSIM_VIF_MAGIC; +} + +static inline void hwsim_clear_magic(struct ieee80211_vif *vif) +{ + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + vp->magic = 0; +} + +struct hwsim_sta_priv { + u32 magic; +}; + +#define HWSIM_STA_MAGIC 0x6d537749 + +static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta) +{ + struct hwsim_sta_priv *sp = (void *)sta->drv_priv; + WARN_ON(sp->magic != HWSIM_STA_MAGIC); +} + +static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta) +{ + struct hwsim_sta_priv *sp = (void *)sta->drv_priv; + sp->magic = HWSIM_STA_MAGIC; +} + +static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta) +{ + struct hwsim_sta_priv *sp = (void *)sta->drv_priv; + sp->magic = 0; +} + +struct hwsim_chanctx_priv { + u32 magic; +}; + +#define HWSIM_CHANCTX_MAGIC 0x6d53774a + +static inline void hwsim_check_chanctx_magic(struct ieee80211_chanctx_conf *c) +{ + struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; + WARN_ON(cp->magic != HWSIM_CHANCTX_MAGIC); +} + +static inline void hwsim_set_chanctx_magic(struct ieee80211_chanctx_conf *c) +{ + struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; + cp->magic = HWSIM_CHANCTX_MAGIC; +} + +static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c) +{ + struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; + cp->magic = 0; +} + +static unsigned int hwsim_net_id; + +static int hwsim_netgroup; + +struct hwsim_net { + int netgroup; + u32 wmediumd; +}; + +static inline int hwsim_net_get_netgroup(struct net *net) +{ + struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); + + return hwsim_net->netgroup; +} + +static inline void hwsim_net_set_netgroup(struct net *net) +{ + struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); + + hwsim_net->netgroup = hwsim_netgroup++; +} + +static inline u32 hwsim_net_get_wmediumd(struct net *net) +{ + struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); + + return hwsim_net->wmediumd; +} + +static inline void hwsim_net_set_wmediumd(struct net *net, u32 portid) +{ + struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); + + hwsim_net->wmediumd = portid; +} + +static struct class *hwsim_class; + +static struct net_device *hwsim_mon; /* global monitor netdev */ + +#define CHAN2G(_freq) { \ + .band = NL80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_freq), \ + .max_power = 20, \ +} + +#define CHAN5G(_freq) { \ + .band = NL80211_BAND_5GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_freq), \ + .max_power = 20, \ +} + +static const struct ieee80211_channel hwsim_channels_2ghz[] = { + CHAN2G(2412), /* Channel 1 */ + CHAN2G(2417), /* Channel 2 */ + CHAN2G(2422), /* Channel 3 */ + CHAN2G(2427), /* Channel 4 */ + CHAN2G(2432), /* Channel 5 */ + CHAN2G(2437), /* Channel 6 */ + CHAN2G(2442), /* Channel 7 */ + CHAN2G(2447), /* Channel 8 */ + CHAN2G(2452), /* Channel 9 */ + CHAN2G(2457), /* Channel 10 */ + CHAN2G(2462), /* Channel 11 */ + CHAN2G(2467), /* Channel 12 */ + CHAN2G(2472), /* Channel 13 */ + CHAN2G(2484), /* Channel 14 */ +}; + +static const struct ieee80211_channel hwsim_channels_5ghz[] = { + CHAN5G(5180), /* Channel 36 */ + CHAN5G(5200), /* Channel 40 */ + CHAN5G(5220), /* Channel 44 */ + CHAN5G(5240), /* Channel 48 */ + + CHAN5G(5260), /* Channel 52 */ + CHAN5G(5280), /* Channel 56 */ + CHAN5G(5300), /* Channel 60 */ + CHAN5G(5320), /* Channel 64 */ + + CHAN5G(5500), /* Channel 100 */ + CHAN5G(5520), /* Channel 104 */ + CHAN5G(5540), /* Channel 108 */ + CHAN5G(5560), /* Channel 112 */ + CHAN5G(5580), /* Channel 116 */ + CHAN5G(5600), /* Channel 120 */ + CHAN5G(5620), /* Channel 124 */ + CHAN5G(5640), /* Channel 128 */ + CHAN5G(5660), /* Channel 132 */ + CHAN5G(5680), /* Channel 136 */ + CHAN5G(5700), /* Channel 140 */ + + CHAN5G(5745), /* Channel 149 */ + CHAN5G(5765), /* Channel 153 */ + CHAN5G(5785), /* Channel 157 */ + CHAN5G(5805), /* Channel 161 */ + CHAN5G(5825), /* Channel 165 */ + CHAN5G(5845), /* Channel 169 */ +}; + +static const struct ieee80211_rate hwsim_rates[] = { + { .bitrate = 10 }, + { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60 }, + { .bitrate = 90 }, + { .bitrate = 120 }, + { .bitrate = 180 }, + { .bitrate = 240 }, + { .bitrate = 360 }, + { .bitrate = 480 }, + { .bitrate = 540 } +}; + +#define OUI_QCA 0x001374 +#define QCA_NL80211_SUBCMD_TEST 1 +enum qca_nl80211_vendor_subcmds { + QCA_WLAN_VENDOR_ATTR_TEST = 8, + QCA_WLAN_VENDOR_ATTR_MAX = QCA_WLAN_VENDOR_ATTR_TEST +}; + +static const struct nla_policy +hwsim_vendor_test_policy[QCA_WLAN_VENDOR_ATTR_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_MAX] = { .type = NLA_U32 }, +}; + +static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct sk_buff *skb; + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_MAX + 1]; + int err; + u32 val; + + err = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len, + hwsim_vendor_test_policy, NULL); + if (err) + return err; + if (!tb[QCA_WLAN_VENDOR_ATTR_TEST]) + return -EINVAL; + val = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_TEST]); + wiphy_debug(wiphy, "%s: test=%u\n", __func__, val); + + /* Send a vendor event as a test. Note that this would not normally be + * done within a command handler, but rather, based on some other + * trigger. For simplicity, this command is used to trigger the event + * here. + * + * event_idx = 0 (index in mac80211_hwsim_vendor_commands) + */ + skb = cfg80211_vendor_event_alloc(wiphy, wdev, 100, 0, GFP_KERNEL); + if (skb) { + /* skb_put() or nla_put() will fill up data within + * NL80211_ATTR_VENDOR_DATA. + */ + + /* Add vendor data */ + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1); + + /* Send the event - this will call nla_nest_end() */ + cfg80211_vendor_event(skb, GFP_KERNEL); + } + + /* Send a response to the command */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 10); + if (!skb) + return -ENOMEM; + + /* skb_put() or nla_put() will fill up data within + * NL80211_ATTR_VENDOR_DATA + */ + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 2); + + return cfg80211_vendor_cmd_reply(skb); +} + +static struct wiphy_vendor_command mac80211_hwsim_vendor_commands[] = { + { + .info = { .vendor_id = OUI_QCA, + .subcmd = QCA_NL80211_SUBCMD_TEST }, + .flags = WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = mac80211_hwsim_vendor_cmd_test, + } +}; + +/* Advertise support vendor specific events */ +static const struct nl80211_vendor_cmd_info mac80211_hwsim_vendor_events[] = { + { .vendor_id = OUI_QCA, .subcmd = 1 }, +}; + +static const struct ieee80211_iface_limit hwsim_if_limits[] = { + { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, + { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO) }, + /* must be last, see hwsim_if_comb */ + { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) } +}; + +static const struct ieee80211_iface_combination hwsim_if_comb[] = { + { + .limits = hwsim_if_limits, + /* remove the last entry which is P2P_DEVICE */ + .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1, + .max_interfaces = 2048, + .num_different_channels = 1, + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_160), + }, +}; + +static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = { + { + .limits = hwsim_if_limits, + .n_limits = ARRAY_SIZE(hwsim_if_limits), + .max_interfaces = 2048, + .num_different_channels = 1, + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_160), + }, +}; + +static spinlock_t hwsim_radio_lock; +static LIST_HEAD(hwsim_radios); +static int hwsim_radio_idx; + +static struct platform_driver mac80211_hwsim_driver = { + .driver = { + .name = "mac80211_hwsim", + }, +}; + +struct mac80211_hwsim_data { + struct list_head list; + struct ieee80211_hw *hw; + struct device *dev; + struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; + struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)]; + struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)]; + struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; + struct ieee80211_iface_combination if_combination; + + struct mac_address addresses[2]; + int channels, idx; + bool use_chanctx; + bool destroy_on_close; + struct work_struct destroy_work; + u32 portid; + char alpha2[2]; + const struct ieee80211_regdomain *regd; + + struct ieee80211_channel *tmp_chan; + struct ieee80211_channel *roc_chan; + u32 roc_duration; + struct delayed_work roc_start; + struct delayed_work roc_done; + struct delayed_work hw_scan; + struct cfg80211_scan_request *hw_scan_request; + struct ieee80211_vif *hw_scan_vif; + int scan_chan_idx; + u8 scan_addr[ETH_ALEN]; + struct { + struct ieee80211_channel *channel; + unsigned long next_start, start, end; + } survey_data[ARRAY_SIZE(hwsim_channels_2ghz) + + ARRAY_SIZE(hwsim_channels_5ghz)]; + + struct ieee80211_channel *channel; + u64 beacon_int /* beacon interval in us */; + unsigned int rx_filter; + bool started, idle, scanning; + struct mutex mutex; + struct tasklet_hrtimer beacon_timer; + enum ps_mode { + PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL + } ps; + bool ps_poll_pending; + struct dentry *debugfs; + + uintptr_t pending_cookie; + struct sk_buff_head pending; /* packets pending */ + /* + * Only radios in the same group can communicate together (the + * channel has to match too). Each bit represents a group. A + * radio can be in more than one group. + */ + u64 group; + + /* group shared by radios created in the same netns */ + int netgroup; + /* wmediumd portid responsible for netgroup of this radio */ + u32 wmediumd; + + /* difference between this hw's clock and the real clock, in usecs */ + s64 tsf_offset; + s64 bcn_delta; + /* absolute beacon transmission time. Used to cover up "tx" delay. */ + u64 abs_bcn_ts; + + /* Stats */ + u64 tx_pkts; + u64 rx_pkts; + u64 tx_bytes; + u64 rx_bytes; + u64 tx_dropped; + u64 tx_failed; +}; + + +struct hwsim_radiotap_hdr { + struct ieee80211_radiotap_header hdr; + __le64 rt_tsft; + u8 rt_flags; + u8 rt_rate; + __le16 rt_channel; + __le16 rt_chbitmask; +} __packed; + +struct hwsim_radiotap_ack_hdr { + struct ieee80211_radiotap_header hdr; + u8 rt_flags; + u8 pad; + __le16 rt_channel; + __le16 rt_chbitmask; +} __packed; + +/* MAC80211_HWSIM netlink family */ +static struct genl_family hwsim_genl_family; + +enum hwsim_multicast_groups { + HWSIM_MCGRP_CONFIG, +}; + +static const struct genl_multicast_group hwsim_mcgrps[] = { + [HWSIM_MCGRP_CONFIG] = { .name = "config", }, +}; + +/* MAC80211_HWSIM netlink policy */ + +static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { + [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN }, + [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN }, + [HWSIM_ATTR_FRAME] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_DATA_LEN }, + [HWSIM_ATTR_FLAGS] = { .type = NLA_U32 }, + [HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 }, + [HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 }, + [HWSIM_ATTR_TX_INFO] = { .type = NLA_UNSPEC, + .len = IEEE80211_TX_MAX_RATES * + sizeof(struct hwsim_tx_rate)}, + [HWSIM_ATTR_COOKIE] = { .type = NLA_U64 }, + [HWSIM_ATTR_CHANNELS] = { .type = NLA_U32 }, + [HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 }, + [HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 }, + [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 }, + [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG }, + [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG }, + [HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE] = { .type = NLA_FLAG }, + [HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING }, + [HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG }, + [HWSIM_ATTR_FREQ] = { .type = NLA_U32 }, +}; + +static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ieee80211_channel *chan); + +/* sysfs attributes */ +static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_data *data = dat; + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + struct sk_buff *skb; + struct ieee80211_pspoll *pspoll; + + if (!vp->assoc) + return; + + wiphy_debug(data->hw->wiphy, + "%s: send PS-Poll to %pM for aid %d\n", + __func__, vp->bssid, vp->aid); + + skb = dev_alloc_skb(sizeof(*pspoll)); + if (!skb) + return; + pspoll = skb_put(skb, sizeof(*pspoll)); + pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | + IEEE80211_STYPE_PSPOLL | + IEEE80211_FCTL_PM); + pspoll->aid = cpu_to_le16(0xc000 | vp->aid); + memcpy(pspoll->bssid, vp->bssid, ETH_ALEN); + memcpy(pspoll->ta, mac, ETH_ALEN); + + rcu_read_lock(); + mac80211_hwsim_tx_frame(data->hw, skb, + rcu_dereference(vif->chanctx_conf)->def.chan); + rcu_read_unlock(); +} + +static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, + struct ieee80211_vif *vif, int ps) +{ + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + + if (!vp->assoc) + return; + + wiphy_debug(data->hw->wiphy, + "%s: send data::nullfunc to %pM ps=%d\n", + __func__, vp->bssid, ps); + + skb = dev_alloc_skb(sizeof(*hdr)); + if (!skb) + return; + hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN); + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC | + (ps ? IEEE80211_FCTL_PM : 0)); + hdr->duration_id = cpu_to_le16(0); + memcpy(hdr->addr1, vp->bssid, ETH_ALEN); + memcpy(hdr->addr2, mac, ETH_ALEN); + memcpy(hdr->addr3, vp->bssid, ETH_ALEN); + + rcu_read_lock(); + mac80211_hwsim_tx_frame(data->hw, skb, + rcu_dereference(vif->chanctx_conf)->def.chan); + rcu_read_unlock(); +} + + +static void hwsim_send_nullfunc_ps(void *dat, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_data *data = dat; + hwsim_send_nullfunc(data, mac, vif, 1); +} + +static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_data *data = dat; + hwsim_send_nullfunc(data, mac, vif, 0); +} + +static int hwsim_fops_ps_read(void *dat, u64 *val) +{ + struct mac80211_hwsim_data *data = dat; + *val = data->ps; + return 0; +} + +static int hwsim_fops_ps_write(void *dat, u64 val) +{ + struct mac80211_hwsim_data *data = dat; + enum ps_mode old_ps; + + if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL && + val != PS_MANUAL_POLL) + return -EINVAL; + + old_ps = data->ps; + data->ps = val; + + local_bh_disable(); + if (val == PS_MANUAL_POLL) { + ieee80211_iterate_active_interfaces_atomic( + data->hw, IEEE80211_IFACE_ITER_NORMAL, + hwsim_send_ps_poll, data); + data->ps_poll_pending = true; + } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { + ieee80211_iterate_active_interfaces_atomic( + data->hw, IEEE80211_IFACE_ITER_NORMAL, + hwsim_send_nullfunc_ps, data); + } else if (old_ps != PS_DISABLED && val == PS_DISABLED) { + ieee80211_iterate_active_interfaces_atomic( + data->hw, IEEE80211_IFACE_ITER_NORMAL, + hwsim_send_nullfunc_no_ps, data); + } + local_bh_enable(); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write, + "%llu\n"); + +static int hwsim_write_simulate_radar(void *dat, u64 val) +{ + struct mac80211_hwsim_data *data = dat; + + ieee80211_radar_detected(data->hw); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(hwsim_simulate_radar, NULL, + hwsim_write_simulate_radar, "%llu\n"); + +static int hwsim_fops_group_read(void *dat, u64 *val) +{ + struct mac80211_hwsim_data *data = dat; + *val = data->group; + return 0; +} + +static int hwsim_fops_group_write(void *dat, u64 val) +{ + struct mac80211_hwsim_data *data = dat; + data->group = val; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group, + hwsim_fops_group_read, hwsim_fops_group_write, + "%llx\n"); + +static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + /* TODO: allow packet injection */ + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static inline u64 mac80211_hwsim_get_tsf_raw(void) +{ + return ktime_to_us(ktime_get_real()); +} + +static __le64 __mac80211_hwsim_get_tsf(struct mac80211_hwsim_data *data) +{ + u64 now = mac80211_hwsim_get_tsf_raw(); + return cpu_to_le64(now + data->tsf_offset); +} + +static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_data *data = hw->priv; + return le64_to_cpu(__mac80211_hwsim_get_tsf(data)); +} + +static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u64 tsf) +{ + struct mac80211_hwsim_data *data = hw->priv; + u64 now = mac80211_hwsim_get_tsf(hw, vif); + u32 bcn_int = data->beacon_int; + u64 delta = abs(tsf - now); + + /* adjust after beaconing with new timestamp at old TBTT */ + if (tsf > now) { + data->tsf_offset += delta; + data->bcn_delta = do_div(delta, bcn_int); + } else { + data->tsf_offset -= delta; + data->bcn_delta = -(s64)do_div(delta, bcn_int); + } +} + +static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw, + struct sk_buff *tx_skb, + struct ieee80211_channel *chan) +{ + struct mac80211_hwsim_data *data = hw->priv; + struct sk_buff *skb; + struct hwsim_radiotap_hdr *hdr; + u16 flags; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_skb); + struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info); + + if (WARN_ON(!txrate)) + return; + + if (!netif_running(hwsim_mon)) + return; + + skb = skb_copy_expand(tx_skb, sizeof(*hdr), 0, GFP_ATOMIC); + if (skb == NULL) + return; + + hdr = skb_push(skb, sizeof(*hdr)); + hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; + hdr->hdr.it_pad = 0; + hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); + hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_RATE) | + (1 << IEEE80211_RADIOTAP_TSFT) | + (1 << IEEE80211_RADIOTAP_CHANNEL)); + hdr->rt_tsft = __mac80211_hwsim_get_tsf(data); + hdr->rt_flags = 0; + hdr->rt_rate = txrate->bitrate / 5; + hdr->rt_channel = cpu_to_le16(chan->center_freq); + flags = IEEE80211_CHAN_2GHZ; + if (txrate->flags & IEEE80211_RATE_ERP_G) + flags |= IEEE80211_CHAN_OFDM; + else + flags |= IEEE80211_CHAN_CCK; + hdr->rt_chbitmask = cpu_to_le16(flags); + + skb->dev = hwsim_mon; + skb_reset_mac_header(skb); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + memset(skb->cb, 0, sizeof(skb->cb)); + netif_rx(skb); +} + + +static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, + const u8 *addr) +{ + struct sk_buff *skb; + struct hwsim_radiotap_ack_hdr *hdr; + u16 flags; + struct ieee80211_hdr *hdr11; + + if (!netif_running(hwsim_mon)) + return; + + skb = dev_alloc_skb(100); + if (skb == NULL) + return; + + hdr = skb_put(skb, sizeof(*hdr)); + hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; + hdr->hdr.it_pad = 0; + hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); + hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_CHANNEL)); + hdr->rt_flags = 0; + hdr->pad = 0; + hdr->rt_channel = cpu_to_le16(chan->center_freq); + flags = IEEE80211_CHAN_2GHZ; + hdr->rt_chbitmask = cpu_to_le16(flags); + + hdr11 = skb_put(skb, 10); + hdr11->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | + IEEE80211_STYPE_ACK); + hdr11->duration_id = cpu_to_le16(0); + memcpy(hdr11->addr1, addr, ETH_ALEN); + + skb->dev = hwsim_mon; + skb_reset_mac_header(skb); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + memset(skb->cb, 0, sizeof(skb->cb)); + netif_rx(skb); +} + +struct mac80211_hwsim_addr_match_data { + u8 addr[ETH_ALEN]; + bool ret; +}; + +static void mac80211_hwsim_addr_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_addr_match_data *md = data; + + if (memcmp(mac, md->addr, ETH_ALEN) == 0) + md->ret = true; +} + +static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data, + const u8 *addr) +{ + struct mac80211_hwsim_addr_match_data md = { + .ret = false, + }; + + if (data->scanning && memcmp(addr, data->scan_addr, ETH_ALEN) == 0) + return true; + + memcpy(md.addr, addr, ETH_ALEN); + + ieee80211_iterate_active_interfaces_atomic(data->hw, + IEEE80211_IFACE_ITER_NORMAL, + mac80211_hwsim_addr_iter, + &md); + + return md.ret; +} + +static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data, + struct sk_buff *skb) +{ + switch (data->ps) { + case PS_DISABLED: + return true; + case PS_ENABLED: + return false; + case PS_AUTO_POLL: + /* TODO: accept (some) Beacons by default and other frames only + * if pending PS-Poll has been sent */ + return true; + case PS_MANUAL_POLL: + /* Allow unicast frames to own address if there is a pending + * PS-Poll */ + if (data->ps_poll_pending && + mac80211_hwsim_addr_match(data, skb->data + 4)) { + data->ps_poll_pending = false; + return true; + } + return false; + } + + return true; +} + +static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data, + struct sk_buff *skb, int portid) +{ + struct net *net; + bool found = false; + int res = -ENOENT; + + rcu_read_lock(); + for_each_net_rcu(net) { + if (data->netgroup == hwsim_net_get_netgroup(net)) { + res = genlmsg_unicast(net, skb, portid); + found = true; + break; + } + } + rcu_read_unlock(); + + if (!found) + nlmsg_free(skb); + + return res; +} + +static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, + struct sk_buff *my_skb, + int dst_portid) +{ + struct sk_buff *skb; + struct mac80211_hwsim_data *data = hw->priv; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) my_skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(my_skb); + void *msg_head; + unsigned int hwsim_flags = 0; + int i; + struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES]; + uintptr_t cookie; + + if (data->ps != PS_DISABLED) + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); + /* If the queue contains MAX_QUEUE skb's drop some */ + if (skb_queue_len(&data->pending) >= MAX_QUEUE) { + /* Droping until WARN_QUEUE level */ + while (skb_queue_len(&data->pending) >= WARN_QUEUE) { + ieee80211_free_txskb(hw, skb_dequeue(&data->pending)); + data->tx_dropped++; + } + } + + skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (skb == NULL) + goto nla_put_failure; + + msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, + HWSIM_CMD_FRAME); + if (msg_head == NULL) { + printk(KERN_DEBUG "mac80211_hwsim: problem with msg_head\n"); + goto nla_put_failure; + } + + if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, + ETH_ALEN, data->addresses[1].addr)) + goto nla_put_failure; + + /* We get the skb->data */ + if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data)) + goto nla_put_failure; + + /* We get the flags for this transmission, and we translate them to + wmediumd flags */ + + if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) + hwsim_flags |= HWSIM_TX_CTL_REQ_TX_STATUS; + + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + hwsim_flags |= HWSIM_TX_CTL_NO_ACK; + + if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags)) + goto nla_put_failure; + + if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq)) + goto nla_put_failure; + + /* We get the tx control (rate and retries) info*/ + + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + tx_attempts[i].idx = info->status.rates[i].idx; + tx_attempts[i].count = info->status.rates[i].count; + } + + if (nla_put(skb, HWSIM_ATTR_TX_INFO, + sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES, + tx_attempts)) + goto nla_put_failure; + + /* We create a cookie to identify this skb */ + data->pending_cookie++; + cookie = data->pending_cookie; + info->rate_driver_data[0] = (void *)cookie; + if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD)) + goto nla_put_failure; + + genlmsg_end(skb, msg_head); + if (hwsim_unicast_netgroup(data, skb, dst_portid)) + goto err_free_txskb; + + /* Enqueue the packet */ + skb_queue_tail(&data->pending, my_skb); + data->tx_pkts++; + data->tx_bytes += my_skb->len; + return; + +nla_put_failure: + nlmsg_free(skb); +err_free_txskb: + printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); + ieee80211_free_txskb(hw, my_skb); + data->tx_failed++; +} + +static bool hwsim_chans_compat(struct ieee80211_channel *c1, + struct ieee80211_channel *c2) +{ + if (!c1 || !c2) + return false; + + return c1->center_freq == c2->center_freq; +} + +struct tx_iter_data { + struct ieee80211_channel *channel; + bool receive; +}; + +static void mac80211_hwsim_tx_iter(void *_data, u8 *addr, + struct ieee80211_vif *vif) +{ + struct tx_iter_data *data = _data; + + if (!vif->chanctx_conf) + return; + + if (!hwsim_chans_compat(data->channel, + rcu_dereference(vif->chanctx_conf)->def.chan)) + return; + + data->receive = true; +} + +static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb) +{ + /* + * To enable this code, #define the HWSIM_RADIOTAP_OUI, + * e.g. like this: + * #define HWSIM_RADIOTAP_OUI "\x02\x00\x00" + * (but you should use a valid OUI, not that) + * + * If anyone wants to 'donate' a radiotap OUI/subns code + * please send a patch removing this #ifdef and changing + * the values accordingly. + */ +#ifdef HWSIM_RADIOTAP_OUI + struct ieee80211_vendor_radiotap *rtap; + + /* + * Note that this code requires the headroom in the SKB + * that was allocated earlier. + */ + rtap = skb_push(skb, sizeof(*rtap) + 8 + 4); + rtap->oui[0] = HWSIM_RADIOTAP_OUI[0]; + rtap->oui[1] = HWSIM_RADIOTAP_OUI[1]; + rtap->oui[2] = HWSIM_RADIOTAP_OUI[2]; + rtap->subns = 127; + + /* + * Radiotap vendor namespaces can (and should) also be + * split into fields by using the standard radiotap + * presence bitmap mechanism. Use just BIT(0) here for + * the presence bitmap. + */ + rtap->present = BIT(0); + /* We have 8 bytes of (dummy) data */ + rtap->len = 8; + /* For testing, also require it to be aligned */ + rtap->align = 8; + /* And also test that padding works, 4 bytes */ + rtap->pad = 4; + /* push the data */ + memcpy(rtap->data, "ABCDEFGH", 8); + /* make sure to clear padding, mac80211 doesn't */ + memset(rtap->data + 8, 0, 4); + + IEEE80211_SKB_RXCB(skb)->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA; +#endif +} + +static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ieee80211_channel *chan) +{ + struct mac80211_hwsim_data *data = hw->priv, *data2; + bool ack = false; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_rx_status rx_status; + u64 now; + + memset(&rx_status, 0, sizeof(rx_status)); + rx_status.flag |= RX_FLAG_MACTIME_START; + rx_status.freq = chan->center_freq; + rx_status.band = chan->band; + if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) { + rx_status.rate_idx = + ieee80211_rate_get_vht_mcs(&info->control.rates[0]); + rx_status.nss = + ieee80211_rate_get_vht_nss(&info->control.rates[0]); + rx_status.encoding = RX_ENC_VHT; + } else { + rx_status.rate_idx = info->control.rates[0].idx; + if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) + rx_status.encoding = RX_ENC_HT; + } + if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + rx_status.bw = RATE_INFO_BW_40; + else if (info->control.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + rx_status.bw = RATE_INFO_BW_80; + else if (info->control.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + rx_status.bw = RATE_INFO_BW_160; + else + rx_status.bw = RATE_INFO_BW_20; + if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) + rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; + /* TODO: simulate real signal strength (and optional packet loss) */ + rx_status.signal = -50; + if (info->control.vif) + rx_status.signal += info->control.vif->bss_conf.txpower; + + if (data->ps != PS_DISABLED) + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); + + /* release the skb's source info */ + skb_orphan(skb); + skb_dst_drop(skb); + skb->mark = 0; + secpath_reset(skb); + nf_reset(skb); + + /* + * Get absolute mactime here so all HWs RX at the "same time", and + * absolute TX time for beacon mactime so the timestamp matches. + * Giving beacons a different mactime than non-beacons looks messy, but + * it helps the Toffset be exact and a ~10us mactime discrepancy + * probably doesn't really matter. + */ + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) + now = data->abs_bcn_ts; + else + now = mac80211_hwsim_get_tsf_raw(); + + /* Copy skb to all enabled radios that are on the current frequency */ + spin_lock(&hwsim_radio_lock); + list_for_each_entry(data2, &hwsim_radios, list) { + struct sk_buff *nskb; + struct tx_iter_data tx_iter_data = { + .receive = false, + .channel = chan, + }; + + if (data == data2) + continue; + + if (!data2->started || (data2->idle && !data2->tmp_chan) || + !hwsim_ps_rx_ok(data2, skb)) + continue; + + if (!(data->group & data2->group)) + continue; + + if (data->netgroup != data2->netgroup) + continue; + + if (!hwsim_chans_compat(chan, data2->tmp_chan) && + !hwsim_chans_compat(chan, data2->channel)) { + ieee80211_iterate_active_interfaces_atomic( + data2->hw, IEEE80211_IFACE_ITER_NORMAL, + mac80211_hwsim_tx_iter, &tx_iter_data); + if (!tx_iter_data.receive) + continue; + } + + /* + * reserve some space for our vendor and the normal + * radiotap header, since we're copying anyway + */ + if (skb->len < PAGE_SIZE && paged_rx) { + struct page *page = alloc_page(GFP_ATOMIC); + + if (!page) + continue; + + nskb = dev_alloc_skb(128); + if (!nskb) { + __free_page(page); + continue; + } + + memcpy(page_address(page), skb->data, skb->len); + skb_add_rx_frag(nskb, 0, page, 0, skb->len, skb->len); + } else { + nskb = skb_copy(skb, GFP_ATOMIC); + if (!nskb) + continue; + } + + if (mac80211_hwsim_addr_match(data2, hdr->addr1)) + ack = true; + + rx_status.mactime = now + data2->tsf_offset; + + memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); + + mac80211_hwsim_add_vendor_rtap(nskb); + + data2->rx_pkts++; + data2->rx_bytes += nskb->len; + ieee80211_rx_irqsafe(data2->hw, nskb); + } + spin_unlock(&hwsim_radio_lock); + + return ack; +} + +static void mac80211_hwsim_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct mac80211_hwsim_data *data = hw->priv; + struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_channel *channel; + bool ack; + u32 _portid; + + if (WARN_ON(skb->len < 10)) { + /* Should not happen; just a sanity check for addr1 use */ + ieee80211_free_txskb(hw, skb); + return; + } + + if (!data->use_chanctx) { + channel = data->channel; + } else if (txi->hw_queue == 4) { + channel = data->tmp_chan; + } else { + chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf); + if (chanctx_conf) + channel = chanctx_conf->def.chan; + else + channel = NULL; + } + + if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) { + ieee80211_free_txskb(hw, skb); + return; + } + + if (data->idle && !data->tmp_chan) { + wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n"); + ieee80211_free_txskb(hw, skb); + return; + } + + if (txi->control.vif) + hwsim_check_magic(txi->control.vif); + if (control->sta) + hwsim_check_sta_magic(control->sta); + + if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) + ieee80211_get_tx_rates(txi->control.vif, control->sta, skb, + txi->control.rates, + ARRAY_SIZE(txi->control.rates)); + + if (skb->len >= 24 + 8 && + ieee80211_is_probe_resp(hdr->frame_control)) { + /* fake header transmission time */ + struct ieee80211_mgmt *mgmt; + struct ieee80211_rate *txrate; + u64 ts; + + mgmt = (struct ieee80211_mgmt *)skb->data; + txrate = ieee80211_get_tx_rate(hw, txi); + ts = mac80211_hwsim_get_tsf_raw(); + mgmt->u.probe_resp.timestamp = + cpu_to_le64(ts + data->tsf_offset + + 24 * 8 * 10 / txrate->bitrate); + } + + mac80211_hwsim_monitor_rx(hw, skb, channel); + + /* wmediumd mode check */ + _portid = ACCESS_ONCE(data->wmediumd); + + if (_portid) + return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); + + /* NO wmediumd detected, perfect medium simulation */ + data->tx_pkts++; + data->tx_bytes += skb->len; + ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel); + + if (ack && skb->len >= 16) + mac80211_hwsim_monitor_ack(channel, hdr->addr2); + + ieee80211_tx_info_clear_status(txi); + + /* frame was transmitted at most favorable rate at first attempt */ + txi->control.rates[0].count = 1; + txi->control.rates[1].idx = -1; + + if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack) + txi->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status_irqsafe(hw, skb); +} + + +static int mac80211_hwsim_start(struct ieee80211_hw *hw) +{ + struct mac80211_hwsim_data *data = hw->priv; + wiphy_debug(hw->wiphy, "%s\n", __func__); + data->started = true; + return 0; +} + + +static void mac80211_hwsim_stop(struct ieee80211_hw *hw) +{ + struct mac80211_hwsim_data *data = hw->priv; + data->started = false; + tasklet_hrtimer_cancel(&data->beacon_timer); + wiphy_debug(hw->wiphy, "%s\n", __func__); +} + + +static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", + __func__, ieee80211_vif_type_p2p(vif), + vif->addr); + hwsim_set_magic(vif); + + vif->cab_queue = 0; + vif->hw_queue[IEEE80211_AC_VO] = 0; + vif->hw_queue[IEEE80211_AC_VI] = 1; + vif->hw_queue[IEEE80211_AC_BE] = 2; + vif->hw_queue[IEEE80211_AC_BK] = 3; + + return 0; +} + + +static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype newtype, + bool newp2p) +{ + newtype = ieee80211_iftype_p2p(newtype, newp2p); + wiphy_debug(hw->wiphy, + "%s (old type=%d, new type=%d, mac_addr=%pM)\n", + __func__, ieee80211_vif_type_p2p(vif), + newtype, vif->addr); + hwsim_check_magic(vif); + + /* + * interface may change from non-AP to AP in + * which case this needs to be set up again + */ + vif->cab_queue = 0; + + return 0; +} + +static void mac80211_hwsim_remove_interface( + struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", + __func__, ieee80211_vif_type_p2p(vif), + vif->addr); + hwsim_check_magic(vif); + hwsim_clear_magic(vif); +} + +static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ieee80211_channel *chan) +{ + struct mac80211_hwsim_data *data = hw->priv; + u32 _pid = ACCESS_ONCE(data->wmediumd); + + if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { + struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); + ieee80211_get_tx_rates(txi->control.vif, NULL, skb, + txi->control.rates, + ARRAY_SIZE(txi->control.rates)); + } + + mac80211_hwsim_monitor_rx(hw, skb, chan); + + if (_pid) + return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); + + mac80211_hwsim_tx_frame_no_nl(hw, skb, chan); + dev_kfree_skb(skb); +} + +static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_data *data = arg; + struct ieee80211_hw *hw = data->hw; + struct ieee80211_tx_info *info; + struct ieee80211_rate *txrate; + struct ieee80211_mgmt *mgmt; + struct sk_buff *skb; + + hwsim_check_magic(vif); + + if (vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_MESH_POINT && + vif->type != NL80211_IFTYPE_ADHOC) + return; + + skb = ieee80211_beacon_get(hw, vif); + if (skb == NULL) + return; + info = IEEE80211_SKB_CB(skb); + if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) + ieee80211_get_tx_rates(vif, NULL, skb, + info->control.rates, + ARRAY_SIZE(info->control.rates)); + + txrate = ieee80211_get_tx_rate(hw, info); + + mgmt = (struct ieee80211_mgmt *) skb->data; + /* fake header transmission time */ + data->abs_bcn_ts = mac80211_hwsim_get_tsf_raw(); + mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts + + data->tsf_offset + + 24 * 8 * 10 / txrate->bitrate); + + mac80211_hwsim_tx_frame(hw, skb, + rcu_dereference(vif->chanctx_conf)->def.chan); + + if (vif->csa_active && ieee80211_csa_is_complete(vif)) + ieee80211_csa_finish(vif); +} + +static enum hrtimer_restart +mac80211_hwsim_beacon(struct hrtimer *timer) +{ + struct mac80211_hwsim_data *data = + container_of(timer, struct mac80211_hwsim_data, + beacon_timer.timer); + struct ieee80211_hw *hw = data->hw; + u64 bcn_int = data->beacon_int; + ktime_t next_bcn; + + if (!data->started) + goto out; + + ieee80211_iterate_active_interfaces_atomic( + hw, IEEE80211_IFACE_ITER_NORMAL, + mac80211_hwsim_beacon_tx, data); + + /* beacon at new TBTT + beacon interval */ + if (data->bcn_delta) { + bcn_int -= data->bcn_delta; + data->bcn_delta = 0; + } + + next_bcn = ktime_add(hrtimer_get_expires(timer), + ns_to_ktime(bcn_int * 1000)); + tasklet_hrtimer_start(&data->beacon_timer, next_bcn, HRTIMER_MODE_ABS); +out: + return HRTIMER_NORESTART; +} + +static const char * const hwsim_chanwidths[] = { + [NL80211_CHAN_WIDTH_20_NOHT] = "noht", + [NL80211_CHAN_WIDTH_20] = "ht20", + [NL80211_CHAN_WIDTH_40] = "ht40", + [NL80211_CHAN_WIDTH_80] = "vht80", + [NL80211_CHAN_WIDTH_80P80] = "vht80p80", + [NL80211_CHAN_WIDTH_160] = "vht160", +}; + +static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mac80211_hwsim_data *data = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = { + [IEEE80211_SMPS_AUTOMATIC] = "auto", + [IEEE80211_SMPS_OFF] = "off", + [IEEE80211_SMPS_STATIC] = "static", + [IEEE80211_SMPS_DYNAMIC] = "dynamic", + }; + int idx; + + if (conf->chandef.chan) + wiphy_debug(hw->wiphy, + "%s (freq=%d(%d - %d)/%s idle=%d ps=%d smps=%s)\n", + __func__, + conf->chandef.chan->center_freq, + conf->chandef.center_freq1, + conf->chandef.center_freq2, + hwsim_chanwidths[conf->chandef.width], + !!(conf->flags & IEEE80211_CONF_IDLE), + !!(conf->flags & IEEE80211_CONF_PS), + smps_modes[conf->smps_mode]); + else + wiphy_debug(hw->wiphy, + "%s (freq=0 idle=%d ps=%d smps=%s)\n", + __func__, + !!(conf->flags & IEEE80211_CONF_IDLE), + !!(conf->flags & IEEE80211_CONF_PS), + smps_modes[conf->smps_mode]); + + data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); + + WARN_ON(conf->chandef.chan && data->use_chanctx); + + mutex_lock(&data->mutex); + if (data->scanning && conf->chandef.chan) { + for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { + if (data->survey_data[idx].channel == data->channel) { + data->survey_data[idx].start = + data->survey_data[idx].next_start; + data->survey_data[idx].end = jiffies; + break; + } + } + + data->channel = conf->chandef.chan; + + for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { + if (data->survey_data[idx].channel && + data->survey_data[idx].channel != data->channel) + continue; + data->survey_data[idx].channel = data->channel; + data->survey_data[idx].next_start = jiffies; + break; + } + } else { + data->channel = conf->chandef.chan; + } + mutex_unlock(&data->mutex); + + if (!data->started || !data->beacon_int) + tasklet_hrtimer_cancel(&data->beacon_timer); + else if (!hrtimer_is_queued(&data->beacon_timer.timer)) { + u64 tsf = mac80211_hwsim_get_tsf(hw, NULL); + u32 bcn_int = data->beacon_int; + u64 until_tbtt = bcn_int - do_div(tsf, bcn_int); + + tasklet_hrtimer_start(&data->beacon_timer, + ns_to_ktime(until_tbtt * 1000), + HRTIMER_MODE_REL); + } + + return 0; +} + + +static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags,u64 multicast) +{ + struct mac80211_hwsim_data *data = hw->priv; + + wiphy_debug(hw->wiphy, "%s\n", __func__); + + data->rx_filter = 0; + if (*total_flags & FIF_ALLMULTI) + data->rx_filter |= FIF_ALLMULTI; + + *total_flags = data->rx_filter; +} + +static void mac80211_hwsim_bcn_en_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + unsigned int *count = data; + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + + if (vp->bcn_en) + (*count)++; +} + +static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + struct mac80211_hwsim_data *data = hw->priv; + + hwsim_check_magic(vif); + + wiphy_debug(hw->wiphy, "%s(changed=0x%x vif->addr=%pM)\n", + __func__, changed, vif->addr); + + if (changed & BSS_CHANGED_BSSID) { + wiphy_debug(hw->wiphy, "%s: BSSID changed: %pM\n", + __func__, info->bssid); + memcpy(vp->bssid, info->bssid, ETH_ALEN); + } + + if (changed & BSS_CHANGED_ASSOC) { + wiphy_debug(hw->wiphy, " ASSOC: assoc=%d aid=%d\n", + info->assoc, info->aid); + vp->assoc = info->assoc; + vp->aid = info->aid; + } + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + wiphy_debug(hw->wiphy, " BCN EN: %d (BI=%u)\n", + info->enable_beacon, info->beacon_int); + vp->bcn_en = info->enable_beacon; + if (data->started && + !hrtimer_is_queued(&data->beacon_timer.timer) && + info->enable_beacon) { + u64 tsf, until_tbtt; + u32 bcn_int; + data->beacon_int = info->beacon_int * 1024; + tsf = mac80211_hwsim_get_tsf(hw, vif); + bcn_int = data->beacon_int; + until_tbtt = bcn_int - do_div(tsf, bcn_int); + tasklet_hrtimer_start(&data->beacon_timer, + ns_to_ktime(until_tbtt * 1000), + HRTIMER_MODE_REL); + } else if (!info->enable_beacon) { + unsigned int count = 0; + ieee80211_iterate_active_interfaces_atomic( + data->hw, IEEE80211_IFACE_ITER_NORMAL, + mac80211_hwsim_bcn_en_iter, &count); + wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u", + count); + if (count == 0) { + tasklet_hrtimer_cancel(&data->beacon_timer); + data->beacon_int = 0; + } + } + } + + if (changed & BSS_CHANGED_ERP_CTS_PROT) { + wiphy_debug(hw->wiphy, " ERP_CTS_PROT: %d\n", + info->use_cts_prot); + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + wiphy_debug(hw->wiphy, " ERP_PREAMBLE: %d\n", + info->use_short_preamble); + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + wiphy_debug(hw->wiphy, " ERP_SLOT: %d\n", info->use_short_slot); + } + + if (changed & BSS_CHANGED_HT) { + wiphy_debug(hw->wiphy, " HT: op_mode=0x%x\n", + info->ht_operation_mode); + } + + if (changed & BSS_CHANGED_BASIC_RATES) { + wiphy_debug(hw->wiphy, " BASIC_RATES: 0x%llx\n", + (unsigned long long) info->basic_rates); + } + + if (changed & BSS_CHANGED_TXPOWER) + wiphy_debug(hw->wiphy, " TX Power: %d dBm\n", info->txpower); +} + +static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + hwsim_check_magic(vif); + hwsim_set_sta_magic(sta); + + return 0; +} + +static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + hwsim_check_magic(vif); + hwsim_clear_sta_magic(sta); + + return 0; +} + +static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + hwsim_check_magic(vif); + + switch (cmd) { + case STA_NOTIFY_SLEEP: + case STA_NOTIFY_AWAKE: + /* TODO: make good use of these flags */ + break; + default: + WARN(1, "Invalid sta notify: %d\n", cmd); + break; + } +} + +static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + bool set) +{ + hwsim_check_sta_magic(sta); + return 0; +} + +static int mac80211_hwsim_conf_tx( + struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + wiphy_debug(hw->wiphy, + "%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n", + __func__, queue, + params->txop, params->cw_min, + params->cw_max, params->aifs); + return 0; +} + +static int mac80211_hwsim_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct mac80211_hwsim_data *hwsim = hw->priv; + + if (idx < 0 || idx >= ARRAY_SIZE(hwsim->survey_data)) + return -ENOENT; + + mutex_lock(&hwsim->mutex); + survey->channel = hwsim->survey_data[idx].channel; + if (!survey->channel) { + mutex_unlock(&hwsim->mutex); + return -ENOENT; + } + + /* + * Magically conjured dummy values --- this is only ok for simulated hardware. + * + * A real driver which cannot determine real values noise MUST NOT + * report any, especially not a magically conjured ones :-) + */ + survey->filled = SURVEY_INFO_NOISE_DBM | + SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY; + survey->noise = -92; + survey->time = + jiffies_to_msecs(hwsim->survey_data[idx].end - + hwsim->survey_data[idx].start); + /* report 12.5% of channel time is used */ + survey->time_busy = survey->time/8; + mutex_unlock(&hwsim->mutex); + + return 0; +} + +#ifdef CONFIG_NL80211_TESTMODE +/* + * This section contains example code for using netlink + * attributes with the testmode command in nl80211. + */ + +/* These enums need to be kept in sync with userspace */ +enum hwsim_testmode_attr { + __HWSIM_TM_ATTR_INVALID = 0, + HWSIM_TM_ATTR_CMD = 1, + HWSIM_TM_ATTR_PS = 2, + + /* keep last */ + __HWSIM_TM_ATTR_AFTER_LAST, + HWSIM_TM_ATTR_MAX = __HWSIM_TM_ATTR_AFTER_LAST - 1 +}; + +enum hwsim_testmode_cmd { + HWSIM_TM_CMD_SET_PS = 0, + HWSIM_TM_CMD_GET_PS = 1, + HWSIM_TM_CMD_STOP_QUEUES = 2, + HWSIM_TM_CMD_WAKE_QUEUES = 3, +}; + +static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = { + [HWSIM_TM_ATTR_CMD] = { .type = NLA_U32 }, + [HWSIM_TM_ATTR_PS] = { .type = NLA_U32 }, +}; + +static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len) +{ + struct mac80211_hwsim_data *hwsim = hw->priv; + struct nlattr *tb[HWSIM_TM_ATTR_MAX + 1]; + struct sk_buff *skb; + int err, ps; + + err = nla_parse(tb, HWSIM_TM_ATTR_MAX, data, len, + hwsim_testmode_policy, NULL); + if (err) + return err; + + if (!tb[HWSIM_TM_ATTR_CMD]) + return -EINVAL; + + switch (nla_get_u32(tb[HWSIM_TM_ATTR_CMD])) { + case HWSIM_TM_CMD_SET_PS: + if (!tb[HWSIM_TM_ATTR_PS]) + return -EINVAL; + ps = nla_get_u32(tb[HWSIM_TM_ATTR_PS]); + return hwsim_fops_ps_write(hwsim, ps); + case HWSIM_TM_CMD_GET_PS: + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, + nla_total_size(sizeof(u32))); + if (!skb) + return -ENOMEM; + if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps)) + goto nla_put_failure; + return cfg80211_testmode_reply(skb); + case HWSIM_TM_CMD_STOP_QUEUES: + ieee80211_stop_queues(hw); + return 0; + case HWSIM_TM_CMD_WAKE_QUEUES: + ieee80211_wake_queues(hw); + return 0; + default: + return -EOPNOTSUPP; + } + + nla_put_failure: + kfree_skb(skb); + return -ENOBUFS; +} +#endif + +static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + + switch (action) { + case IEEE80211_AMPDU_TX_START: + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + break; + case IEEE80211_AMPDU_RX_START: + case IEEE80211_AMPDU_RX_STOP: + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static void mac80211_hwsim_flush(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + /* Not implemented, queues only on kernel side */ +} + +static void hw_scan_work(struct work_struct *work) +{ + struct mac80211_hwsim_data *hwsim = + container_of(work, struct mac80211_hwsim_data, hw_scan.work); + struct cfg80211_scan_request *req = hwsim->hw_scan_request; + int dwell, i; + + mutex_lock(&hwsim->mutex); + if (hwsim->scan_chan_idx >= req->n_channels) { + struct cfg80211_scan_info info = { + .aborted = false, + }; + + wiphy_debug(hwsim->hw->wiphy, "hw scan complete\n"); + ieee80211_scan_completed(hwsim->hw, &info); + hwsim->hw_scan_request = NULL; + hwsim->hw_scan_vif = NULL; + hwsim->tmp_chan = NULL; + mutex_unlock(&hwsim->mutex); + return; + } + + wiphy_debug(hwsim->hw->wiphy, "hw scan %d MHz\n", + req->channels[hwsim->scan_chan_idx]->center_freq); + + hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx]; + if (hwsim->tmp_chan->flags & (IEEE80211_CHAN_NO_IR | + IEEE80211_CHAN_RADAR) || + !req->n_ssids) { + dwell = 120; + } else { + dwell = 30; + /* send probes */ + for (i = 0; i < req->n_ssids; i++) { + struct sk_buff *probe; + struct ieee80211_mgmt *mgmt; + + probe = ieee80211_probereq_get(hwsim->hw, + hwsim->scan_addr, + req->ssids[i].ssid, + req->ssids[i].ssid_len, + req->ie_len); + if (!probe) + continue; + + mgmt = (struct ieee80211_mgmt *) probe->data; + memcpy(mgmt->da, req->bssid, ETH_ALEN); + memcpy(mgmt->bssid, req->bssid, ETH_ALEN); + + if (req->ie_len) + skb_put_data(probe, req->ie, req->ie_len); + + local_bh_disable(); + mac80211_hwsim_tx_frame(hwsim->hw, probe, + hwsim->tmp_chan); + local_bh_enable(); + } + } + ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, + msecs_to_jiffies(dwell)); + hwsim->survey_data[hwsim->scan_chan_idx].channel = hwsim->tmp_chan; + hwsim->survey_data[hwsim->scan_chan_idx].start = jiffies; + hwsim->survey_data[hwsim->scan_chan_idx].end = + jiffies + msecs_to_jiffies(dwell); + hwsim->scan_chan_idx++; + mutex_unlock(&hwsim->mutex); +} + +static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) +{ + struct mac80211_hwsim_data *hwsim = hw->priv; + struct cfg80211_scan_request *req = &hw_req->req; + + mutex_lock(&hwsim->mutex); + if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) { + mutex_unlock(&hwsim->mutex); + return -EBUSY; + } + hwsim->hw_scan_request = req; + hwsim->hw_scan_vif = vif; + hwsim->scan_chan_idx = 0; + if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) + get_random_mask_addr(hwsim->scan_addr, + hw_req->req.mac_addr, + hw_req->req.mac_addr_mask); + else + memcpy(hwsim->scan_addr, vif->addr, ETH_ALEN); + memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); + mutex_unlock(&hwsim->mutex); + + wiphy_debug(hw->wiphy, "hwsim hw_scan request\n"); + + ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0); + + return 0; +} + +static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_data *hwsim = hw->priv; + struct cfg80211_scan_info info = { + .aborted = true, + }; + + wiphy_debug(hw->wiphy, "hwsim cancel_hw_scan\n"); + + cancel_delayed_work_sync(&hwsim->hw_scan); + + mutex_lock(&hwsim->mutex); + ieee80211_scan_completed(hwsim->hw, &info); + hwsim->tmp_chan = NULL; + hwsim->hw_scan_request = NULL; + hwsim->hw_scan_vif = NULL; + mutex_unlock(&hwsim->mutex); +} + +static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) +{ + struct mac80211_hwsim_data *hwsim = hw->priv; + + mutex_lock(&hwsim->mutex); + + if (hwsim->scanning) { + printk(KERN_DEBUG "two hwsim sw_scans detected!\n"); + goto out; + } + + printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n"); + + memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN); + hwsim->scanning = true; + memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); + +out: + mutex_unlock(&hwsim->mutex); +} + +static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_data *hwsim = hw->priv; + + mutex_lock(&hwsim->mutex); + + printk(KERN_DEBUG "hwsim sw_scan_complete\n"); + hwsim->scanning = false; + eth_zero_addr(hwsim->scan_addr); + + mutex_unlock(&hwsim->mutex); +} + +static void hw_roc_start(struct work_struct *work) +{ + struct mac80211_hwsim_data *hwsim = + container_of(work, struct mac80211_hwsim_data, roc_start.work); + + mutex_lock(&hwsim->mutex); + + wiphy_debug(hwsim->hw->wiphy, "hwsim ROC begins\n"); + hwsim->tmp_chan = hwsim->roc_chan; + ieee80211_ready_on_channel(hwsim->hw); + + ieee80211_queue_delayed_work(hwsim->hw, &hwsim->roc_done, + msecs_to_jiffies(hwsim->roc_duration)); + + mutex_unlock(&hwsim->mutex); +} + +static void hw_roc_done(struct work_struct *work) +{ + struct mac80211_hwsim_data *hwsim = + container_of(work, struct mac80211_hwsim_data, roc_done.work); + + mutex_lock(&hwsim->mutex); + ieee80211_remain_on_channel_expired(hwsim->hw); + hwsim->tmp_chan = NULL; + mutex_unlock(&hwsim->mutex); + + wiphy_debug(hwsim->hw->wiphy, "hwsim ROC expired\n"); +} + +static int mac80211_hwsim_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum ieee80211_roc_type type) +{ + struct mac80211_hwsim_data *hwsim = hw->priv; + + mutex_lock(&hwsim->mutex); + if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) { + mutex_unlock(&hwsim->mutex); + return -EBUSY; + } + + hwsim->roc_chan = chan; + hwsim->roc_duration = duration; + mutex_unlock(&hwsim->mutex); + + wiphy_debug(hw->wiphy, "hwsim ROC (%d MHz, %d ms)\n", + chan->center_freq, duration); + ieee80211_queue_delayed_work(hw, &hwsim->roc_start, HZ/50); + + return 0; +} + +static int mac80211_hwsim_croc(struct ieee80211_hw *hw) +{ + struct mac80211_hwsim_data *hwsim = hw->priv; + + cancel_delayed_work_sync(&hwsim->roc_start); + cancel_delayed_work_sync(&hwsim->roc_done); + + mutex_lock(&hwsim->mutex); + hwsim->tmp_chan = NULL; + mutex_unlock(&hwsim->mutex); + + wiphy_debug(hw->wiphy, "hwsim ROC canceled\n"); + + return 0; +} + +static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + hwsim_set_chanctx_magic(ctx); + wiphy_debug(hw->wiphy, + "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", + ctx->def.chan->center_freq, ctx->def.width, + ctx->def.center_freq1, ctx->def.center_freq2); + return 0; +} + +static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + wiphy_debug(hw->wiphy, + "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", + ctx->def.chan->center_freq, ctx->def.width, + ctx->def.center_freq1, ctx->def.center_freq2); + hwsim_check_chanctx_magic(ctx); + hwsim_clear_chanctx_magic(ctx); +} + +static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed) +{ + hwsim_check_chanctx_magic(ctx); + wiphy_debug(hw->wiphy, + "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", + ctx->def.chan->center_freq, ctx->def.width, + ctx->def.center_freq1, ctx->def.center_freq2); +} + +static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + hwsim_check_magic(vif); + hwsim_check_chanctx_magic(ctx); + + return 0; +} + +static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + hwsim_check_magic(vif); + hwsim_check_chanctx_magic(ctx); +} + +static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = { + "tx_pkts_nic", + "tx_bytes_nic", + "rx_pkts_nic", + "rx_bytes_nic", + "d_tx_dropped", + "d_tx_failed", + "d_ps_mode", + "d_group", +}; + +#define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats) + +static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data) +{ + if (sset == ETH_SS_STATS) + memcpy(data, *mac80211_hwsim_gstrings_stats, + sizeof(mac80211_hwsim_gstrings_stats)); +} + +static int mac80211_hwsim_get_et_sset_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset) +{ + if (sset == ETH_SS_STATS) + return MAC80211_HWSIM_SSTATS_LEN; + return 0; +} + +static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data) +{ + struct mac80211_hwsim_data *ar = hw->priv; + int i = 0; + + data[i++] = ar->tx_pkts; + data[i++] = ar->tx_bytes; + data[i++] = ar->rx_pkts; + data[i++] = ar->rx_bytes; + data[i++] = ar->tx_dropped; + data[i++] = ar->tx_failed; + data[i++] = ar->ps; + data[i++] = ar->group; + + WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN); +} + +#define HWSIM_COMMON_OPS \ + .tx = mac80211_hwsim_tx, \ + .start = mac80211_hwsim_start, \ + .stop = mac80211_hwsim_stop, \ + .add_interface = mac80211_hwsim_add_interface, \ + .change_interface = mac80211_hwsim_change_interface, \ + .remove_interface = mac80211_hwsim_remove_interface, \ + .config = mac80211_hwsim_config, \ + .configure_filter = mac80211_hwsim_configure_filter, \ + .bss_info_changed = mac80211_hwsim_bss_info_changed, \ + .sta_add = mac80211_hwsim_sta_add, \ + .sta_remove = mac80211_hwsim_sta_remove, \ + .sta_notify = mac80211_hwsim_sta_notify, \ + .set_tim = mac80211_hwsim_set_tim, \ + .conf_tx = mac80211_hwsim_conf_tx, \ + .get_survey = mac80211_hwsim_get_survey, \ + CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) \ + .ampdu_action = mac80211_hwsim_ampdu_action, \ + .flush = mac80211_hwsim_flush, \ + .get_tsf = mac80211_hwsim_get_tsf, \ + .set_tsf = mac80211_hwsim_set_tsf, \ + .get_et_sset_count = mac80211_hwsim_get_et_sset_count, \ + .get_et_stats = mac80211_hwsim_get_et_stats, \ + .get_et_strings = mac80211_hwsim_get_et_strings, + +static const struct ieee80211_ops mac80211_hwsim_ops = { + HWSIM_COMMON_OPS + .sw_scan_start = mac80211_hwsim_sw_scan, + .sw_scan_complete = mac80211_hwsim_sw_scan_complete, +}; + +static const struct ieee80211_ops mac80211_hwsim_mchan_ops = { + HWSIM_COMMON_OPS + .hw_scan = mac80211_hwsim_hw_scan, + .cancel_hw_scan = mac80211_hwsim_cancel_hw_scan, + .sw_scan_start = NULL, + .sw_scan_complete = NULL, + .remain_on_channel = mac80211_hwsim_roc, + .cancel_remain_on_channel = mac80211_hwsim_croc, + .add_chanctx = mac80211_hwsim_add_chanctx, + .remove_chanctx = mac80211_hwsim_remove_chanctx, + .change_chanctx = mac80211_hwsim_change_chanctx, + .assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx, + .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx, +}; + +struct hwsim_new_radio_params { + unsigned int channels; + const char *reg_alpha2; + const struct ieee80211_regdomain *regd; + bool reg_strict; + bool p2p_device; + bool use_chanctx; + bool destroy_on_close; + const char *hwname; + bool no_vif; +}; + +static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb, + struct genl_info *info) +{ + if (info) + genl_notify(&hwsim_genl_family, mcast_skb, info, + HWSIM_MCGRP_CONFIG, GFP_KERNEL); + else + genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0, + HWSIM_MCGRP_CONFIG, GFP_KERNEL); +} + +static int append_radio_msg(struct sk_buff *skb, int id, + struct hwsim_new_radio_params *param) +{ + int ret; + + ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id); + if (ret < 0) + return ret; + + if (param->channels) { + ret = nla_put_u32(skb, HWSIM_ATTR_CHANNELS, param->channels); + if (ret < 0) + return ret; + } + + if (param->reg_alpha2) { + ret = nla_put(skb, HWSIM_ATTR_REG_HINT_ALPHA2, 2, + param->reg_alpha2); + if (ret < 0) + return ret; + } + + if (param->regd) { + int i; + + for (i = 0; i < ARRAY_SIZE(hwsim_world_regdom_custom); i++) { + if (hwsim_world_regdom_custom[i] != param->regd) + continue; + + ret = nla_put_u32(skb, HWSIM_ATTR_REG_CUSTOM_REG, i); + if (ret < 0) + return ret; + break; + } + } + + if (param->reg_strict) { + ret = nla_put_flag(skb, HWSIM_ATTR_REG_STRICT_REG); + if (ret < 0) + return ret; + } + + if (param->p2p_device) { + ret = nla_put_flag(skb, HWSIM_ATTR_SUPPORT_P2P_DEVICE); + if (ret < 0) + return ret; + } + + if (param->use_chanctx) { + ret = nla_put_flag(skb, HWSIM_ATTR_USE_CHANCTX); + if (ret < 0) + return ret; + } + + if (param->hwname) { + ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, + strlen(param->hwname), param->hwname); + if (ret < 0) + return ret; + } + + return 0; +} + +static void hwsim_mcast_new_radio(int id, struct genl_info *info, + struct hwsim_new_radio_params *param) +{ + struct sk_buff *mcast_skb; + void *data; + + mcast_skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!mcast_skb) + return; + + data = genlmsg_put(mcast_skb, 0, 0, &hwsim_genl_family, 0, + HWSIM_CMD_NEW_RADIO); + if (!data) + goto out_err; + + if (append_radio_msg(mcast_skb, id, param) < 0) + goto out_err; + + genlmsg_end(mcast_skb, data); + + hwsim_mcast_config_msg(mcast_skb, info); + return; + +out_err: + genlmsg_cancel(mcast_skb, data); + nlmsg_free(mcast_skb); +} + +static int mac80211_hwsim_new_radio(struct genl_info *info, + struct hwsim_new_radio_params *param) +{ + int err; + u8 addr[ETH_ALEN]; + struct mac80211_hwsim_data *data; + struct ieee80211_hw *hw; + enum nl80211_band band; + const struct ieee80211_ops *ops = &mac80211_hwsim_ops; + struct net *net; + int idx; + + if (WARN_ON(param->channels > 1 && !param->use_chanctx)) + return -EINVAL; + + spin_lock_bh(&hwsim_radio_lock); + idx = hwsim_radio_idx++; + spin_unlock_bh(&hwsim_radio_lock); + + if (param->use_chanctx) + ops = &mac80211_hwsim_mchan_ops; + hw = ieee80211_alloc_hw_nm(sizeof(*data), ops, param->hwname); + if (!hw) { + printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw failed\n"); + err = -ENOMEM; + goto failed; + } + + /* ieee80211_alloc_hw_nm may have used a default name */ + param->hwname = wiphy_name(hw->wiphy); + + if (info) + net = genl_info_net(info); + else + net = &init_net; + wiphy_net_set(hw->wiphy, net); + + data = hw->priv; + data->hw = hw; + + data->dev = device_create(hwsim_class, NULL, 0, hw, "hwsim%d", idx); + if (IS_ERR(data->dev)) { + printk(KERN_DEBUG + "mac80211_hwsim: device_create failed (%ld)\n", + PTR_ERR(data->dev)); + err = -ENOMEM; + goto failed_drvdata; + } + data->dev->driver = &mac80211_hwsim_driver.driver; + err = device_bind_driver(data->dev); + if (err != 0) { + printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n", + err); + goto failed_bind; + } + + skb_queue_head_init(&data->pending); + + SET_IEEE80211_DEV(hw, data->dev); + eth_zero_addr(addr); + addr[0] = 0x02; + addr[3] = idx >> 8; + addr[4] = idx; + memcpy(data->addresses[0].addr, addr, ETH_ALEN); + memcpy(data->addresses[1].addr, addr, ETH_ALEN); + data->addresses[1].addr[0] |= 0x40; + hw->wiphy->n_addresses = 2; + hw->wiphy->addresses = data->addresses; + + data->channels = param->channels; + data->use_chanctx = param->use_chanctx; + data->idx = idx; + data->destroy_on_close = param->destroy_on_close; + if (info) + data->portid = info->snd_portid; + + if (data->use_chanctx) { + hw->wiphy->max_scan_ssids = 255; + hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + hw->wiphy->max_remain_on_channel_duration = 1000; + hw->wiphy->iface_combinations = &data->if_combination; + if (param->p2p_device) + data->if_combination = hwsim_if_comb_p2p_dev[0]; + else + data->if_combination = hwsim_if_comb[0]; + hw->wiphy->n_iface_combinations = 1; + /* For channels > 1 DFS is not allowed */ + data->if_combination.radar_detect_widths = 0; + data->if_combination.num_different_channels = data->channels; + } else if (param->p2p_device) { + hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev; + hw->wiphy->n_iface_combinations = + ARRAY_SIZE(hwsim_if_comb_p2p_dev); + } else { + hw->wiphy->iface_combinations = hwsim_if_comb; + hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb); + } + + INIT_DELAYED_WORK(&data->roc_start, hw_roc_start); + INIT_DELAYED_WORK(&data->roc_done, hw_roc_done); + INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work); + + hw->queues = 5; + hw->offchannel_tx_hw_queue = 4; + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_MESH_POINT); + + if (param->p2p_device) + hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE); + + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); + ieee80211_hw_set(hw, CHANCTX_STA_CSA); + ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); + ieee80211_hw_set(hw, QUEUE_CONTROL); + ieee80211_hw_set(hw, WANT_MONITOR_VIF); + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, MFP_CAPABLE); + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, TDLS_WIDER_BW); + if (rctbl) + ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); + + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_AP_UAPSD | + WIPHY_FLAG_HAS_CHANNEL_SWITCH; + hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR | + NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | + NL80211_FEATURE_STATIC_SMPS | + NL80211_FEATURE_DYNAMIC_SMPS | + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + + /* ask mac80211 to reserve space for magic */ + hw->vif_data_size = sizeof(struct hwsim_vif_priv); + hw->sta_data_size = sizeof(struct hwsim_sta_priv); + hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv); + + memcpy(data->channels_2ghz, hwsim_channels_2ghz, + sizeof(hwsim_channels_2ghz)); + memcpy(data->channels_5ghz, hwsim_channels_5ghz, + sizeof(hwsim_channels_5ghz)); + memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates)); + + for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *sband = &data->bands[band]; + switch (band) { + case NL80211_BAND_2GHZ: + sband->channels = data->channels_2ghz; + sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz); + sband->bitrates = data->rates; + sband->n_bitrates = ARRAY_SIZE(hwsim_rates); + break; + case NL80211_BAND_5GHZ: + sband->channels = data->channels_5ghz; + sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz); + sband->bitrates = data->rates + 4; + sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4; + + sband->vht_cap.vht_supported = true; + sband->vht_cap.cap = + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ | + IEEE80211_VHT_CAP_RXLDPC | + IEEE80211_VHT_CAP_SHORT_GI_80 | + IEEE80211_VHT_CAP_SHORT_GI_160 | + IEEE80211_VHT_CAP_TXSTBC | + IEEE80211_VHT_CAP_RXSTBC_1 | + IEEE80211_VHT_CAP_RXSTBC_2 | + IEEE80211_VHT_CAP_RXSTBC_3 | + IEEE80211_VHT_CAP_RXSTBC_4 | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + sband->vht_cap.vht_mcs.rx_mcs_map = + cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | + IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | + IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | + IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | + IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | + IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | + IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | + IEEE80211_VHT_MCS_SUPPORT_0_9 << 14); + sband->vht_cap.vht_mcs.tx_mcs_map = + sband->vht_cap.vht_mcs.rx_mcs_map; + break; + default: + continue; + } + + sband->ht_cap.ht_supported = true; + sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_DSSSCCK40; + sband->ht_cap.ampdu_factor = 0x3; + sband->ht_cap.ampdu_density = 0x6; + memset(&sband->ht_cap.mcs, 0, + sizeof(sband->ht_cap.mcs)); + sband->ht_cap.mcs.rx_mask[0] = 0xff; + sband->ht_cap.mcs.rx_mask[1] = 0xff; + sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + + hw->wiphy->bands[band] = sband; + } + + /* By default all radios belong to the first group */ + data->group = 1; + mutex_init(&data->mutex); + + data->netgroup = hwsim_net_get_netgroup(net); + + /* Enable frame retransmissions for lossy channels */ + hw->max_rates = 4; + hw->max_rate_tries = 11; + + hw->wiphy->vendor_commands = mac80211_hwsim_vendor_commands; + hw->wiphy->n_vendor_commands = + ARRAY_SIZE(mac80211_hwsim_vendor_commands); + hw->wiphy->vendor_events = mac80211_hwsim_vendor_events; + hw->wiphy->n_vendor_events = ARRAY_SIZE(mac80211_hwsim_vendor_events); + + if (param->reg_strict) + hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG; + if (param->regd) { + data->regd = param->regd; + hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; + wiphy_apply_custom_regulatory(hw->wiphy, param->regd); + /* give the regulatory workqueue a chance to run */ + schedule_timeout_interruptible(1); + } + + if (param->no_vif) + ieee80211_hw_set(hw, NO_AUTO_VIF); + + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + + err = ieee80211_register_hw(hw); + if (err < 0) { + printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n", + err); + goto failed_hw; + } + + wiphy_debug(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr); + + if (param->reg_alpha2) { + data->alpha2[0] = param->reg_alpha2[0]; + data->alpha2[1] = param->reg_alpha2[1]; + regulatory_hint(hw->wiphy, param->reg_alpha2); + } + + data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir); + debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps); + debugfs_create_file("group", 0666, data->debugfs, data, + &hwsim_fops_group); + if (!data->use_chanctx) + debugfs_create_file("dfs_simulate_radar", 0222, + data->debugfs, + data, &hwsim_simulate_radar); + + tasklet_hrtimer_init(&data->beacon_timer, + mac80211_hwsim_beacon, + CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + + spin_lock_bh(&hwsim_radio_lock); + list_add_tail(&data->list, &hwsim_radios); + spin_unlock_bh(&hwsim_radio_lock); + + if (idx > 0) + hwsim_mcast_new_radio(idx, info, param); + + return idx; + +failed_hw: + device_release_driver(data->dev); +failed_bind: + device_unregister(data->dev); +failed_drvdata: + ieee80211_free_hw(hw); +failed: + return err; +} + +static void hwsim_mcast_del_radio(int id, const char *hwname, + struct genl_info *info) +{ + struct sk_buff *skb; + void *data; + int ret; + + skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!skb) + return; + + data = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, + HWSIM_CMD_DEL_RADIO); + if (!data) + goto error; + + ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id); + if (ret < 0) + goto error; + + ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(hwname), + hwname); + if (ret < 0) + goto error; + + genlmsg_end(skb, data); + + hwsim_mcast_config_msg(skb, info); + + return; + +error: + nlmsg_free(skb); +} + +static void mac80211_hwsim_del_radio(struct mac80211_hwsim_data *data, + const char *hwname, + struct genl_info *info) +{ + hwsim_mcast_del_radio(data->idx, hwname, info); + debugfs_remove_recursive(data->debugfs); + ieee80211_unregister_hw(data->hw); + device_release_driver(data->dev); + device_unregister(data->dev); + ieee80211_free_hw(data->hw); +} + +static int mac80211_hwsim_get_radio(struct sk_buff *skb, + struct mac80211_hwsim_data *data, + u32 portid, u32 seq, + struct netlink_callback *cb, int flags) +{ + void *hdr; + struct hwsim_new_radio_params param = { }; + int res = -EMSGSIZE; + + hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags, + HWSIM_CMD_GET_RADIO); + if (!hdr) + return -EMSGSIZE; + + if (cb) + genl_dump_check_consistent(cb, hdr, &hwsim_genl_family); + + if (data->alpha2[0] && data->alpha2[1]) + param.reg_alpha2 = data->alpha2; + + param.reg_strict = !!(data->hw->wiphy->regulatory_flags & + REGULATORY_STRICT_REG); + param.p2p_device = !!(data->hw->wiphy->interface_modes & + BIT(NL80211_IFTYPE_P2P_DEVICE)); + param.use_chanctx = data->use_chanctx; + param.regd = data->regd; + param.channels = data->channels; + param.hwname = wiphy_name(data->hw->wiphy); + + res = append_radio_msg(skb, data->idx, ¶m); + if (res < 0) + goto out_err; + + genlmsg_end(skb, hdr); + return 0; + +out_err: + genlmsg_cancel(skb, hdr); + return res; +} + +static void mac80211_hwsim_free(void) +{ + struct mac80211_hwsim_data *data; + + spin_lock_bh(&hwsim_radio_lock); + while ((data = list_first_entry_or_null(&hwsim_radios, + struct mac80211_hwsim_data, + list))) { + list_del(&data->list); + spin_unlock_bh(&hwsim_radio_lock); + mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), + NULL); + spin_lock_bh(&hwsim_radio_lock); + } + spin_unlock_bh(&hwsim_radio_lock); + class_destroy(hwsim_class); +} + +static const struct net_device_ops hwsim_netdev_ops = { + .ndo_start_xmit = hwsim_mon_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static void hwsim_mon_setup(struct net_device *dev) +{ + dev->netdev_ops = &hwsim_netdev_ops; + dev->needs_free_netdev = true; + ether_setup(dev); + dev->priv_flags |= IFF_NO_QUEUE; + dev->type = ARPHRD_IEEE80211_RADIOTAP; + eth_zero_addr(dev->dev_addr); + dev->dev_addr[0] = 0x12; +} + +static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr) +{ + struct mac80211_hwsim_data *data; + bool _found = false; + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry(data, &hwsim_radios, list) { + if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) { + _found = true; + break; + } + } + spin_unlock_bh(&hwsim_radio_lock); + + if (!_found) + return NULL; + + return data; +} + +static void hwsim_register_wmediumd(struct net *net, u32 portid) +{ + struct mac80211_hwsim_data *data; + + hwsim_net_set_wmediumd(net, portid); + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry(data, &hwsim_radios, list) { + if (data->netgroup == hwsim_net_get_netgroup(net)) + data->wmediumd = portid; + } + spin_unlock_bh(&hwsim_radio_lock); +} + +static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, + struct genl_info *info) +{ + + struct ieee80211_hdr *hdr; + struct mac80211_hwsim_data *data2; + struct ieee80211_tx_info *txi; + struct hwsim_tx_rate *tx_attempts; + u64 ret_skb_cookie; + struct sk_buff *skb, *tmp; + const u8 *src; + unsigned int hwsim_flags; + int i; + bool found = false; + + if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || + !info->attrs[HWSIM_ATTR_FLAGS] || + !info->attrs[HWSIM_ATTR_COOKIE] || + !info->attrs[HWSIM_ATTR_SIGNAL] || + !info->attrs[HWSIM_ATTR_TX_INFO]) + goto out; + + src = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]); + hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]); + ret_skb_cookie = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]); + + data2 = get_hwsim_data_ref_from_addr(src); + if (!data2) + goto out; + + if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) + goto out; + + if (info->snd_portid != data2->wmediumd) + goto out; + + /* look for the skb matching the cookie passed back from user */ + skb_queue_walk_safe(&data2->pending, skb, tmp) { + u64 skb_cookie; + + txi = IEEE80211_SKB_CB(skb); + skb_cookie = (u64)(uintptr_t)txi->rate_driver_data[0]; + + if (skb_cookie == ret_skb_cookie) { + skb_unlink(skb, &data2->pending); + found = true; + break; + } + } + + /* not found */ + if (!found) + goto out; + + /* Tx info received because the frame was broadcasted on user space, + so we get all the necessary info: tx attempts and skb control buff */ + + tx_attempts = (struct hwsim_tx_rate *)nla_data( + info->attrs[HWSIM_ATTR_TX_INFO]); + + /* now send back TX status */ + txi = IEEE80211_SKB_CB(skb); + + ieee80211_tx_info_clear_status(txi); + + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + txi->status.rates[i].idx = tx_attempts[i].idx; + txi->status.rates[i].count = tx_attempts[i].count; + /*txi->status.rates[i].flags = 0;*/ + } + + txi->status.ack_signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); + + if (!(hwsim_flags & HWSIM_TX_CTL_NO_ACK) && + (hwsim_flags & HWSIM_TX_STAT_ACK)) { + if (skb->len >= 16) { + hdr = (struct ieee80211_hdr *) skb->data; + mac80211_hwsim_monitor_ack(data2->channel, + hdr->addr2); + } + txi->flags |= IEEE80211_TX_STAT_ACK; + } + ieee80211_tx_status_irqsafe(data2->hw, skb); + return 0; +out: + return -EINVAL; + +} + +static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, + struct genl_info *info) +{ + struct mac80211_hwsim_data *data2; + struct ieee80211_rx_status rx_status; + const u8 *dst; + int frame_data_len; + void *frame_data; + struct sk_buff *skb = NULL; + + if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] || + !info->attrs[HWSIM_ATTR_FRAME] || + !info->attrs[HWSIM_ATTR_RX_RATE] || + !info->attrs[HWSIM_ATTR_SIGNAL]) + goto out; + + dst = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_RECEIVER]); + frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]); + frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]); + + /* Allocate new skb here */ + skb = alloc_skb(frame_data_len, GFP_KERNEL); + if (skb == NULL) + goto err; + + if (frame_data_len > IEEE80211_MAX_DATA_LEN) + goto err; + + /* Copy the data */ + skb_put_data(skb, frame_data, frame_data_len); + + data2 = get_hwsim_data_ref_from_addr(dst); + if (!data2) + goto out; + + if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) + goto out; + + if (info->snd_portid != data2->wmediumd) + goto out; + + /* check if radio is configured properly */ + + if (data2->idle || !data2->started) + goto out; + + /* A frame is received from user space */ + memset(&rx_status, 0, sizeof(rx_status)); + if (info->attrs[HWSIM_ATTR_FREQ]) { + /* throw away off-channel packets, but allow both the temporary + * ("hw" scan/remain-on-channel) and regular channel, since the + * internal datapath also allows this + */ + mutex_lock(&data2->mutex); + rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]); + + if (rx_status.freq != data2->channel->center_freq && + (!data2->tmp_chan || + rx_status.freq != data2->tmp_chan->center_freq)) { + mutex_unlock(&data2->mutex); + goto out; + } + mutex_unlock(&data2->mutex); + } else { + rx_status.freq = data2->channel->center_freq; + } + + rx_status.band = data2->channel->band; + rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); + rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); + + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + data2->rx_pkts++; + data2->rx_bytes += skb->len; + ieee80211_rx_irqsafe(data2->hw, skb); + + return 0; +err: + printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); +out: + dev_kfree_skb(skb); + return -EINVAL; +} + +static int hwsim_register_received_nl(struct sk_buff *skb_2, + struct genl_info *info) +{ + struct net *net = genl_info_net(info); + struct mac80211_hwsim_data *data; + int chans = 1; + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry(data, &hwsim_radios, list) + chans = max(chans, data->channels); + spin_unlock_bh(&hwsim_radio_lock); + + /* In the future we should revise the userspace API and allow it + * to set a flag that it does support multi-channel, then we can + * let this pass conditionally on the flag. + * For current userspace, prohibit it since it won't work right. + */ + if (chans > 1) + return -EOPNOTSUPP; + + if (hwsim_net_get_wmediumd(net)) + return -EBUSY; + + hwsim_register_wmediumd(net, info->snd_portid); + + printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, " + "switching to wmediumd mode with pid %d\n", info->snd_portid); + + return 0; +} + +static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) +{ + struct hwsim_new_radio_params param = { 0 }; + const char *hwname = NULL; + + param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; + param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; + param.channels = channels; + param.destroy_on_close = + info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE]; + + if (info->attrs[HWSIM_ATTR_CHANNELS]) + param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); + + if (info->attrs[HWSIM_ATTR_NO_VIF]) + param.no_vif = true; + + if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { + hwname = kasprintf(GFP_KERNEL, "%.*s", + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), + (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); + if (!hwname) + return -ENOMEM; + param.hwname = hwname; + } + + if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) + param.use_chanctx = true; + else + param.use_chanctx = (param.channels > 1); + + if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]) + param.reg_alpha2 = + nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]); + + if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { + u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); + + if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) + return -EINVAL; + param.regd = hwsim_world_regdom_custom[idx]; + } + + return mac80211_hwsim_new_radio(info, ¶m); +} + +static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) +{ + struct mac80211_hwsim_data *data; + s64 idx = -1; + const char *hwname = NULL; + + if (info->attrs[HWSIM_ATTR_RADIO_ID]) { + idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); + } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { + hwname = kasprintf(GFP_KERNEL, "%.*s", + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), + (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); + if (!hwname) + return -ENOMEM; + } else + return -EINVAL; + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry(data, &hwsim_radios, list) { + if (idx >= 0) { + if (data->idx != idx) + continue; + } else { + if (!hwname || + strcmp(hwname, wiphy_name(data->hw->wiphy))) + continue; + } + + if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) + continue; + + list_del(&data->list); + spin_unlock_bh(&hwsim_radio_lock); + mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), + info); + kfree(hwname); + return 0; + } + spin_unlock_bh(&hwsim_radio_lock); + + kfree(hwname); + return -ENODEV; +} + +static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) +{ + struct mac80211_hwsim_data *data; + struct sk_buff *skb; + int idx, res = -ENODEV; + + if (!info->attrs[HWSIM_ATTR_RADIO_ID]) + return -EINVAL; + idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry(data, &hwsim_radios, list) { + if (data->idx != idx) + continue; + + if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) + continue; + + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!skb) { + res = -ENOMEM; + goto out_err; + } + + res = mac80211_hwsim_get_radio(skb, data, info->snd_portid, + info->snd_seq, NULL, 0); + if (res < 0) { + nlmsg_free(skb); + goto out_err; + } + + genlmsg_reply(skb, info); + break; + } + +out_err: + spin_unlock_bh(&hwsim_radio_lock); + + return res; +} + +static int hwsim_dump_radio_nl(struct sk_buff *skb, + struct netlink_callback *cb) +{ + int idx = cb->args[0]; + struct mac80211_hwsim_data *data = NULL; + int res; + + spin_lock_bh(&hwsim_radio_lock); + + if (idx == hwsim_radio_idx) + goto done; + + list_for_each_entry(data, &hwsim_radios, list) { + if (data->idx < idx) + continue; + + if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk))) + continue; + + res = mac80211_hwsim_get_radio(skb, data, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, cb, + NLM_F_MULTI); + if (res < 0) + break; + + idx = data->idx + 1; + } + + cb->args[0] = idx; + +done: + spin_unlock_bh(&hwsim_radio_lock); + return skb->len; +} + +/* Generic Netlink operations array */ +static const struct genl_ops hwsim_ops[] = { + { + .cmd = HWSIM_CMD_REGISTER, + .policy = hwsim_genl_policy, + .doit = hwsim_register_received_nl, + .flags = GENL_UNS_ADMIN_PERM, + }, + { + .cmd = HWSIM_CMD_FRAME, + .policy = hwsim_genl_policy, + .doit = hwsim_cloned_frame_received_nl, + }, + { + .cmd = HWSIM_CMD_TX_INFO_FRAME, + .policy = hwsim_genl_policy, + .doit = hwsim_tx_info_frame_received_nl, + }, + { + .cmd = HWSIM_CMD_NEW_RADIO, + .policy = hwsim_genl_policy, + .doit = hwsim_new_radio_nl, + .flags = GENL_UNS_ADMIN_PERM, + }, + { + .cmd = HWSIM_CMD_DEL_RADIO, + .policy = hwsim_genl_policy, + .doit = hwsim_del_radio_nl, + .flags = GENL_UNS_ADMIN_PERM, + }, + { + .cmd = HWSIM_CMD_GET_RADIO, + .policy = hwsim_genl_policy, + .doit = hwsim_get_radio_nl, + .dumpit = hwsim_dump_radio_nl, + }, +}; + +static struct genl_family hwsim_genl_family __ro_after_init = { + .name = "MAC80211_HWSIM", + .version = 1, + .maxattr = HWSIM_ATTR_MAX, + .netnsok = true, + .module = THIS_MODULE, + .ops = hwsim_ops, + .n_ops = ARRAY_SIZE(hwsim_ops), + .mcgrps = hwsim_mcgrps, + .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), +}; + +static void destroy_radio(struct work_struct *work) +{ + struct mac80211_hwsim_data *data = + container_of(work, struct mac80211_hwsim_data, destroy_work); + + mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL); +} + +static void remove_user_radios(u32 portid) +{ + struct mac80211_hwsim_data *entry, *tmp; + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { + if (entry->destroy_on_close && entry->portid == portid) { + list_del(&entry->list); + INIT_WORK(&entry->destroy_work, destroy_radio); + schedule_work(&entry->destroy_work); + } + } + spin_unlock_bh(&hwsim_radio_lock); +} + +static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, + unsigned long state, + void *_notify) +{ + struct netlink_notify *notify = _notify; + + if (state != NETLINK_URELEASE) + return NOTIFY_DONE; + + remove_user_radios(notify->portid); + + if (notify->portid == hwsim_net_get_wmediumd(notify->net)) { + printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" + " socket, switching to perfect channel medium\n"); + hwsim_register_wmediumd(notify->net, 0); + } + return NOTIFY_DONE; + +} + +static struct notifier_block hwsim_netlink_notifier = { + .notifier_call = mac80211_hwsim_netlink_notify, +}; + +static int __init hwsim_init_netlink(void) +{ + int rc; + + printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); + + rc = genl_register_family(&hwsim_genl_family); + if (rc) + goto failure; + + rc = netlink_register_notifier(&hwsim_netlink_notifier); + if (rc) { + genl_unregister_family(&hwsim_genl_family); + goto failure; + } + + return 0; + +failure: + printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); + return -EINVAL; +} + +static __net_init int hwsim_init_net(struct net *net) +{ + hwsim_net_set_netgroup(net); + + return 0; +} + +static void __net_exit hwsim_exit_net(struct net *net) +{ + struct mac80211_hwsim_data *data, *tmp; + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry_safe(data, tmp, &hwsim_radios, list) { + if (!net_eq(wiphy_net(data->hw->wiphy), net)) + continue; + + /* Radios created in init_net are returned to init_net. */ + if (data->netgroup == hwsim_net_get_netgroup(&init_net)) + continue; + + list_del(&data->list); + INIT_WORK(&data->destroy_work, destroy_radio); + schedule_work(&data->destroy_work); + } + spin_unlock_bh(&hwsim_radio_lock); +} + +static struct pernet_operations hwsim_net_ops = { + .init = hwsim_init_net, + .exit = hwsim_exit_net, + .id = &hwsim_net_id, + .size = sizeof(struct hwsim_net), +}; + +static void hwsim_exit_netlink(void) +{ + /* unregister the notifier */ + netlink_unregister_notifier(&hwsim_netlink_notifier); + /* unregister the family */ + genl_unregister_family(&hwsim_genl_family); +} + +static int __init init_mac80211_hwsim(void) +{ + int i, err; + + if (radios < 0 || radios > 100) + return -EINVAL; + + if (channels < 1) + return -EINVAL; + + spin_lock_init(&hwsim_radio_lock); + + err = register_pernet_device(&hwsim_net_ops); + if (err) + return err; + + err = platform_driver_register(&mac80211_hwsim_driver); + if (err) + goto out_unregister_pernet; + + hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); + if (IS_ERR(hwsim_class)) { + err = PTR_ERR(hwsim_class); + goto out_unregister_driver; + } + + err = hwsim_init_netlink(); + if (err < 0) + goto out_unregister_driver; + + for (i = 0; i < radios; i++) { + struct hwsim_new_radio_params param = { 0 }; + + param.channels = channels; + + switch (regtest) { + case HWSIM_REGTEST_DIFF_COUNTRY: + if (i < ARRAY_SIZE(hwsim_alpha2s)) + param.reg_alpha2 = hwsim_alpha2s[i]; + break; + case HWSIM_REGTEST_DRIVER_REG_FOLLOW: + if (!i) + param.reg_alpha2 = hwsim_alpha2s[0]; + break; + case HWSIM_REGTEST_STRICT_ALL: + param.reg_strict = true; + case HWSIM_REGTEST_DRIVER_REG_ALL: + param.reg_alpha2 = hwsim_alpha2s[0]; + break; + case HWSIM_REGTEST_WORLD_ROAM: + if (i == 0) + param.regd = &hwsim_world_regdom_custom_01; + break; + case HWSIM_REGTEST_CUSTOM_WORLD: + param.regd = &hwsim_world_regdom_custom_01; + break; + case HWSIM_REGTEST_CUSTOM_WORLD_2: + if (i == 0) + param.regd = &hwsim_world_regdom_custom_01; + else if (i == 1) + param.regd = &hwsim_world_regdom_custom_02; + break; + case HWSIM_REGTEST_STRICT_FOLLOW: + if (i == 0) { + param.reg_strict = true; + param.reg_alpha2 = hwsim_alpha2s[0]; + } + break; + case HWSIM_REGTEST_STRICT_AND_DRIVER_REG: + if (i == 0) { + param.reg_strict = true; + param.reg_alpha2 = hwsim_alpha2s[0]; + } else if (i == 1) { + param.reg_alpha2 = hwsim_alpha2s[1]; + } + break; + case HWSIM_REGTEST_ALL: + switch (i) { + case 0: + param.regd = &hwsim_world_regdom_custom_01; + break; + case 1: + param.regd = &hwsim_world_regdom_custom_02; + break; + case 2: + param.reg_alpha2 = hwsim_alpha2s[0]; + break; + case 3: + param.reg_alpha2 = hwsim_alpha2s[1]; + break; + case 4: + param.reg_strict = true; + param.reg_alpha2 = hwsim_alpha2s[2]; + break; + } + break; + default: + break; + } + + param.p2p_device = support_p2p_device; + param.use_chanctx = channels > 1; + + err = mac80211_hwsim_new_radio(NULL, ¶m); + if (err < 0) + goto out_free_radios; + } + + hwsim_mon = alloc_netdev(0, "hwsim%d", NET_NAME_UNKNOWN, + hwsim_mon_setup); + if (hwsim_mon == NULL) { + err = -ENOMEM; + goto out_free_radios; + } + + rtnl_lock(); + err = dev_alloc_name(hwsim_mon, hwsim_mon->name); + if (err < 0) { + rtnl_unlock(); + goto out_free_radios; + } + + err = register_netdevice(hwsim_mon); + if (err < 0) { + rtnl_unlock(); + goto out_free_mon; + } + rtnl_unlock(); + + return 0; + +out_free_mon: + free_netdev(hwsim_mon); +out_free_radios: + mac80211_hwsim_free(); +out_unregister_driver: + platform_driver_unregister(&mac80211_hwsim_driver); +out_unregister_pernet: + unregister_pernet_device(&hwsim_net_ops); + return err; +} +module_init(init_mac80211_hwsim); + +static void __exit exit_mac80211_hwsim(void) +{ + printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n"); + + hwsim_exit_netlink(); + + mac80211_hwsim_free(); + unregister_netdev(hwsim_mon); + platform_driver_unregister(&mac80211_hwsim_driver); + unregister_pernet_device(&hwsim_net_ops); +} +module_exit(exit_mac80211_hwsim); diff --git a/mac80211_hwsim/mac80211_hwsim.h b/mac80211_hwsim/mac80211_hwsim.h new file mode 100644 index 0000000..3f5eda5 --- /dev/null +++ b/mac80211_hwsim/mac80211_hwsim.h @@ -0,0 +1,174 @@ +/* + * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 + * Copyright (c) 2008, Jouni Malinen + * Copyright (c) 2011, Javier Lopez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAC80211_HWSIM_H +#define __MAC80211_HWSIM_H + +/** + * enum hwsim_tx_control_flags - flags to describe transmission info/status + * + * These flags are used to give the wmediumd extra information in order to + * modify its behavior for each frame + * + * @HWSIM_TX_CTL_REQ_TX_STATUS: require TX status callback for this frame. + * @HWSIM_TX_CTL_NO_ACK: tell the wmediumd not to wait for an ack + * @HWSIM_TX_STAT_ACK: Frame was acknowledged + * + */ +enum hwsim_tx_control_flags { + HWSIM_TX_CTL_REQ_TX_STATUS = BIT(0), + HWSIM_TX_CTL_NO_ACK = BIT(1), + HWSIM_TX_STAT_ACK = BIT(2), +}; + +/** + * DOC: Frame transmission/registration support + * + * Frame transmission and registration support exists to allow userspace + * entities such as wmediumd to receive and process all broadcasted + * frames from a mac80211_hwsim radio device. + * + * This allow user space applications to decide if the frame should be + * dropped or not and implement a wireless medium simulator at user space. + * + * Registration is done by sending a register message to the driver and + * will be automatically unregistered if the user application doesn't + * responds to sent frames. + * Once registered the user application has to take responsibility of + * broadcasting the frames to all listening mac80211_hwsim radio + * interfaces. + * + * For more technical details, see the corresponding command descriptions + * below. + */ + +/** + * enum hwsim_commands - supported hwsim commands + * + * @HWSIM_CMD_UNSPEC: unspecified command to catch errors + * + * @HWSIM_CMD_REGISTER: request to register and received all broadcasted + * frames by any mac80211_hwsim radio device. + * @HWSIM_CMD_FRAME: send/receive a broadcasted frame from/to kernel/user + * space, uses: + * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_ADDR_RECEIVER, + * %HWSIM_ATTR_FRAME, %HWSIM_ATTR_FLAGS, %HWSIM_ATTR_RX_RATE, + * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE, %HWSIM_ATTR_FREQ (optional) + * @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to + * kernel, uses: + * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS, + * %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE + * @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters, + * returns the radio ID (>= 0) or negative on errors, if successful + * then multicast the result + * @HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted + * @HWSIM_CMD_GET_RADIO: fetch information about existing radios, uses: + * %HWSIM_ATTR_RADIO_ID + * @__HWSIM_CMD_MAX: enum limit + */ +enum { + HWSIM_CMD_UNSPEC, + HWSIM_CMD_REGISTER, + HWSIM_CMD_FRAME, + HWSIM_CMD_TX_INFO_FRAME, + HWSIM_CMD_NEW_RADIO, + HWSIM_CMD_DEL_RADIO, + HWSIM_CMD_GET_RADIO, + __HWSIM_CMD_MAX, +}; +#define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1) + +#define HWSIM_CMD_CREATE_RADIO HWSIM_CMD_NEW_RADIO +#define HWSIM_CMD_DESTROY_RADIO HWSIM_CMD_DEL_RADIO + +/** + * enum hwsim_attrs - hwsim netlink attributes + * + * @HWSIM_ATTR_UNSPEC: unspecified attribute to catch errors + * + * @HWSIM_ATTR_ADDR_RECEIVER: MAC address of the radio device that + * the frame is broadcasted to + * @HWSIM_ATTR_ADDR_TRANSMITTER: MAC address of the radio device that + * the frame was broadcasted from + * @HWSIM_ATTR_FRAME: Data array + * @HWSIM_ATTR_FLAGS: mac80211 transmission flags, used to process + properly the frame at user space + * @HWSIM_ATTR_RX_RATE: estimated rx rate index for this frame at user + space + * @HWSIM_ATTR_SIGNAL: estimated RX signal for this frame at user + space + * @HWSIM_ATTR_TX_INFO: ieee80211_tx_rate array + * @HWSIM_ATTR_COOKIE: sk_buff cookie to identify the frame + * @HWSIM_ATTR_CHANNELS: u32 attribute used with the %HWSIM_CMD_CREATE_RADIO + * command giving the number of channels supported by the new radio + * @HWSIM_ATTR_RADIO_ID: u32 attribute used with %HWSIM_CMD_DESTROY_RADIO + * only to destroy a radio + * @HWSIM_ATTR_REG_HINT_ALPHA2: alpha2 for regulatoro driver hint + * (nla string, length 2) + * @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute) + * @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute) + * @HWSIM_ATTR_SUPPORT_P2P_DEVICE: support P2P Device virtual interface (flag) + * @HWSIM_ATTR_USE_CHANCTX: used with the %HWSIM_CMD_CREATE_RADIO + * command to force use of channel contexts even when only a + * single channel is supported + * @HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE: used with the %HWSIM_CMD_CREATE_RADIO + * command to force radio removal when process that created the radio dies + * @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666 + * @HWSIM_ATTR_NO_VIF: Do not create vif (wlanX) when creating radio. + * @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received. + * @__HWSIM_ATTR_MAX: enum limit + */ + + +enum { + HWSIM_ATTR_UNSPEC, + HWSIM_ATTR_ADDR_RECEIVER, + HWSIM_ATTR_ADDR_TRANSMITTER, + HWSIM_ATTR_FRAME, + HWSIM_ATTR_FLAGS, + HWSIM_ATTR_RX_RATE, + HWSIM_ATTR_SIGNAL, + HWSIM_ATTR_TX_INFO, + HWSIM_ATTR_COOKIE, + HWSIM_ATTR_CHANNELS, + HWSIM_ATTR_RADIO_ID, + HWSIM_ATTR_REG_HINT_ALPHA2, + HWSIM_ATTR_REG_CUSTOM_REG, + HWSIM_ATTR_REG_STRICT_REG, + HWSIM_ATTR_SUPPORT_P2P_DEVICE, + HWSIM_ATTR_USE_CHANCTX, + HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE, + HWSIM_ATTR_RADIO_NAME, + HWSIM_ATTR_NO_VIF, + HWSIM_ATTR_FREQ, + HWSIM_ATTR_PAD, + __HWSIM_ATTR_MAX, +}; +#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1) + +/** + * struct hwsim_tx_rate - rate selection/status + * + * @idx: rate index to attempt to send with + * @count: number of tries in this rate before going to the next rate + * + * A value of -1 for @idx indicates an invalid rate and, if used + * in an array of retry rates, that no more rates should be tried. + * + * When used for transmit status reporting, the driver should + * always report the rate and number of retries used. + * + */ +struct hwsim_tx_rate { + s8 idx; + u8 count; +} __packed; + +#endif /* __MAC80211_HWSIM_H */ From 7a503d3548ad298133db29d06dd887b15f7584cc Mon Sep 17 00:00:00 2001 From: Fengqian Gao Date: Tue, 10 Sep 2019 04:19:27 -0400 Subject: [PATCH 2/8] HotFix for binder driver build error if binder enabled in kernel config This is a tmp fix. Need to backport binderfs code from kernel 5.0.X Tracked-On: OAM-86838 Signed-off-by: Wang, Liang --- binder/binder.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/binder/binder.c b/binder/binder.c index a79204f..caa9e9e 100644 --- a/binder/binder.c +++ b/binder/binder.c @@ -152,7 +152,7 @@ static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | //yangbin tmp comments because it will cause crash when we insmod //module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); -static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; +static char *binder_devices_param = (strcmp(CONFIG_ANDROID_BINDER_DEVICES, "")) ? CONFIG_ANDROID_BINDER_DEVICES : "hostbinder,hostvndbinder,hosthwbinder"; module_param_named(devices, binder_devices_param, charp, 0444); static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); From a52ee2a65de3c3ca345a1d7dcb861447bbe83a79 Mon Sep 17 00:00:00 2001 From: Fengqian Gao Date: Thu, 14 Nov 2019 06:28:01 -0500 Subject: [PATCH 3/8] [WA]Binderfs compile error on kernel higher than 5.3.0 Currently used binderfs code are back-ported from kernel 4.21 sget_userns function is removed in kernel 5.3.0, which failed the compiling of binderfs. Binderfs source code will be updated in later patches. Tracked-On: OAM-88513 Signed-off-by: Hongcheng Xie Signed-off-by: Fengqian Gao --- binder/binderfs.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/binder/binderfs.c b/binder/binderfs.c index f54f842..5199a35 100644 --- a/binder/binderfs.c +++ b/binder/binderfs.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -477,6 +478,19 @@ static struct dentry *binderfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { + +/* + sget_userns function is removed in kernel version higher than 5.3 + This will fail the compiling of binderfs, since currently used binderfs is + backport from kernel 4.21 + In the latest binderfs code, these codes are changed too. + So, this is a hotfix which enabing CIC on kernel high than 5.3.0 + (TODO) Update binderfs code +*/ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + return mount_nodev(fs_type, flags, data, binderfs_fill_super); + +#else struct super_block *sb; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; @@ -499,6 +513,8 @@ static struct dentry *binderfs_mount(struct file_system_type *fs_type, } return dget(sb->s_root); + +#endif } static void binderfs_kill_super(struct super_block *sb) From 3528965d3fe41a90963f1d4732180c005accddeb Mon Sep 17 00:00:00 2001 From: Joe Zheng Date: Tue, 26 Nov 2019 15:52:41 +0800 Subject: [PATCH 4/8] Revert "[WA]Binderfs compile error on kernel higher than 5.3.0" This reverts commit a52ee2a65de3c3ca345a1d7dcb861447bbe83a79. Tracked-On: OAM-88674 --- binder/binderfs.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/binder/binderfs.c b/binder/binderfs.c index 5199a35..f54f842 100644 --- a/binder/binderfs.c +++ b/binder/binderfs.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -478,19 +477,6 @@ static struct dentry *binderfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - -/* - sget_userns function is removed in kernel version higher than 5.3 - This will fail the compiling of binderfs, since currently used binderfs is - backport from kernel 4.21 - In the latest binderfs code, these codes are changed too. - So, this is a hotfix which enabing CIC on kernel high than 5.3.0 - (TODO) Update binderfs code -*/ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) - return mount_nodev(fs_type, flags, data, binderfs_fill_super); - -#else struct super_block *sb; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; @@ -513,8 +499,6 @@ static struct dentry *binderfs_mount(struct file_system_type *fs_type, } return dget(sb->s_root); - -#endif } static void binderfs_kill_super(struct super_block *sb) From ad500b040b6803aa9aade95ab3fa6c1287e78422 Mon Sep 17 00:00:00 2001 From: Fengqian Gao Date: Sat, 12 Oct 2019 14:59:47 +0800 Subject: [PATCH 5/8] Update binderfs code to align with ubuntu kernel 5.0.0 To keep compatible with kernel lower to 4.14, some changes are needed. For ubuntu kernel, security hook secid_to_secctx is missing which will cause binder transaction failed. Disable it for now. Change-Id: I0e702b0c3d3d46d91416667cdd1ddba3db7028f0 Tracked-On: OAM-88674 --- binder/Makefile | 14 +- binder/binder.c | 1128 ++++++++++++++++++--------- binder/binder.h | 504 ++++++++++++ binder/binder_alloc.c | 178 +++-- binder/binder_alloc.h | 30 +- binder/binder_internal.h | 24 +- binder/binder_trace.h | 43 +- binder/binderfs.c | 314 ++++---- binder/{binder_ctl.h => binderfs.h} | 14 +- binder/deps.c | 129 ++- binder/deps.h | 43 + 11 files changed, 1782 insertions(+), 639 deletions(-) create mode 100644 binder/binder.h rename binder/{binder_ctl.h => binderfs.h} (77%) create mode 100644 binder/deps.h diff --git a/binder/Makefile b/binder/Makefile index b4ac7d8..0126060 100644 --- a/binder/Makefile +++ b/binder/Makefile @@ -1,7 +1,7 @@ -ccflags-y += -I$(src) -Wno-int-conversion -Wno-error=incompatible-pointer-types -DCONFIG_ANDROID_BINDER_DEVICES="\"hostbinder,hostvndbinder,hosthwbinder\"" -DCONFIG_ANDROID_BINDERFS -obj-m := binderfs_module.o binder_module.o -binder_module-y := deps.o binder.o binder_alloc.o -binderfs_module-y := deps.o binderfs.o +ccflags-y += -I$(src) -Wno-int-conversion -Wno-error=incompatible-pointer-types -DCONFIG_ANDROID_BINDER_DEVICES="\"\"" -DCONFIG_ANDROID_BINDERFS + +obj-m := binder_linux.o +binder_linux-y := binder.o binder_alloc.o deps.o binderfs.o KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build @@ -9,9 +9,7 @@ all: $(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD install: - cp binder_module.ko $(DESTDIR)/ - cp binderfs_module.ko $(DESTDIR)/ - insmod binder_module.ko - insmod binderfs_module.ko + cp binder_linux.ko $(DESTDIR)/ + insmod binder_linux.ko clean: rm -rf *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions *.ur-safe diff --git a/binder/binder.c b/binder/binder.c index caa9e9e..89d715a 100644 --- a/binder/binder.c +++ b/binder/binder.c @@ -51,7 +51,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include #include #include @@ -71,22 +70,38 @@ #include #include #include +#include +#include #include +#include -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) -#include -#include -#include -#endif - -#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT -#define BINDER_IPC_32BIT 1 -#endif +#include -#include +#include "binder.h" #include "binder_alloc.h" #include "binder_internal.h" #include "binder_trace.h" +#include "deps.h" + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 16, 0) +typedef unsigned __bitwise __poll_t; +typedef int vm_fault_t; + +#define DEFINE_SHOW_ATTRIBUTE(__name) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __name ## _show, inode->i_private); \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +#endif static HLIST_HEAD(binder_deferred_list); static DEFINE_MUTEX(binder_deferred_lock); @@ -102,22 +117,8 @@ static struct dentry *binder_debugfs_dir_entry_root; static struct dentry *binder_debugfs_dir_entry_proc; static atomic_t binder_last_id; -#define BINDER_DEBUG_ENTRY(name) \ -static int binder_##name##_open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, binder_##name##_show, inode->i_private); \ -} \ -\ -static const struct file_operations binder_##name##_fops = { \ - .owner = THIS_MODULE, \ - .open = binder_##name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ -} - -static int binder_proc_show(struct seq_file *m, void *unused); -BINDER_DEBUG_ENTRY(proc); +static int proc_show(struct seq_file *m, void *unused); +DEFINE_SHOW_ATTRIBUTE(proc); /* This is only defined in include/asm-arm/sizes.h */ #ifndef SZ_1K @@ -149,10 +150,9 @@ enum { }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; -//yangbin tmp comments because it will cause crash when we insmod -//module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); +module_param_named(debug_mask, binder_debug_mask, uint, 0644); -static char *binder_devices_param = (strcmp(CONFIG_ANDROID_BINDER_DEVICES, "")) ? CONFIG_ANDROID_BINDER_DEVICES : "hostbinder,hostvndbinder,hosthwbinder"; +static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; module_param_named(devices, binder_devices_param, charp, 0444); static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); @@ -174,18 +174,18 @@ static int binder_set_stop_on_user_error(const char *val, return ret; } module_param_call(stop_on_user_error, binder_set_stop_on_user_error, - param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); + param_get_int, &binder_stop_on_user_error, 0644); #define binder_debug(mask, x...) \ do { \ if (binder_debug_mask & mask) \ - pr_info(x); \ + pr_info_ratelimited(x); \ } while (0) #define binder_user_error(x...) \ do { \ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ - pr_info(x); \ + pr_info_ratelimited(x); \ if (binder_stop_on_user_error) \ binder_stop_on_user_error = 2; \ } while (0) @@ -263,7 +263,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add( unsigned int cur = atomic_inc_return(&log->cur); if (cur >= ARRAY_SIZE(log->entry)) - log->full = 1; + log->full = true; e = &log->entry[cur % ARRAY_SIZE(log->entry)]; WRITE_ONCE(e->debug_id_done, 0); /* @@ -355,6 +355,8 @@ struct binder_error { * (invariant after initialized) * @min_priority: minimum scheduling priority * (invariant after initialized) + * @txn_security_ctx: require sender's security context + * (invariant after initialized) * @async_todo: list of async work items * (protected by @proc->inner_lock) * @@ -391,6 +393,7 @@ struct binder_node { * invariant after initialization */ u8 accept_fds:1; + u8 txn_security_ctx:1; u8 min_priority; }; bool has_async_transaction; @@ -458,9 +461,8 @@ struct binder_ref { }; enum binder_deferred_state { - BINDER_DEFERRED_PUT_FILES = 0x01, - BINDER_DEFERRED_FLUSH = 0x02, - BINDER_DEFERRED_RELEASE = 0x04, + BINDER_DEFERRED_FLUSH = 0x01, + BINDER_DEFERRED_RELEASE = 0x02, }; /** @@ -481,9 +483,6 @@ enum binder_deferred_state { * (invariant after initialized) * @tsk task_struct for group_leader of process * (invariant after initialized) - * @files files_struct for process - * (protected by @files_lock) - * @files_lock mutex to protect @files * @deferred_work_node: element for binder_deferred_list * (protected by binder_deferred_lock) * @deferred_work: bitmap of deferred work to perform @@ -493,8 +492,6 @@ enum binder_deferred_state { * (protected by @inner_lock) * @todo: list of work for this process * (protected by @inner_lock) - * @wait: wait queue head to wait for proc work - * (invariant after initialized) * @stats: per-process binder statistics * (atomics, no lock needed) * @delivered_death: list of delivered death notification @@ -530,14 +527,11 @@ struct binder_proc { struct list_head waiting_threads; int pid; struct task_struct *tsk; - struct files_struct *files; - struct mutex files_lock; struct hlist_node deferred_work_node; int deferred_work; bool is_dead; struct list_head todo; - wait_queue_head_t wait; struct binder_stats stats; struct list_head delivered_death; int max_threads; @@ -579,6 +573,8 @@ enum { * (protected by @proc->inner_lock) * @todo: list of work to do for this thread * (protected by @proc->inner_lock) + * @process_todo: whether work in @todo should be processed + * (protected by @proc->inner_lock) * @return_error: transaction errors reported by this thread * (only accessed by this thread) * @reply_error: transaction errors reported by target thread @@ -604,6 +600,7 @@ struct binder_thread { bool looper_need_return; /* can be written by other thread */ struct binder_transaction *transaction_stack; struct list_head todo; + bool process_todo; struct binder_error return_error; struct binder_error reply_error; wait_queue_head_t wait; @@ -612,6 +609,23 @@ struct binder_thread { bool is_dead; }; +/** + * struct binder_txn_fd_fixup - transaction fd fixup list element + * @fixup_entry: list entry + * @file: struct file to be associated with new fd + * @offset: offset in buffer data to this fixup + * + * List element for fd fixups in a transaction. Since file + * descriptors need to be allocated in the context of the + * target process, we pass each fd to be processed in this + * struct. + */ +struct binder_txn_fd_fixup { + struct list_head fixup_entry; + struct file *file; + size_t offset; +}; + struct binder_transaction { int debug_id; struct binder_work work; @@ -629,6 +643,8 @@ struct binder_transaction { long priority; long saved_priority; kuid_t sender_euid; + struct list_head fd_fixups; + binder_uintptr_t security_ctx; /** * @lock: protects @from, @to_proc, and @to_thread * @@ -648,6 +664,7 @@ struct binder_transaction { #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) static void _binder_proc_lock(struct binder_proc *proc, int line) + __acquires(&proc->outer_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -663,6 +680,7 @@ _binder_proc_lock(struct binder_proc *proc, int line) #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) static void _binder_proc_unlock(struct binder_proc *proc, int line) + __releases(&proc->outer_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -678,6 +696,7 @@ _binder_proc_unlock(struct binder_proc *proc, int line) #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) static void _binder_inner_proc_lock(struct binder_proc *proc, int line) + __acquires(&proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -693,6 +712,7 @@ _binder_inner_proc_lock(struct binder_proc *proc, int line) #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) static void _binder_inner_proc_unlock(struct binder_proc *proc, int line) + __releases(&proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -708,6 +728,7 @@ _binder_inner_proc_unlock(struct binder_proc *proc, int line) #define binder_node_lock(node) _binder_node_lock(node, __LINE__) static void _binder_node_lock(struct binder_node *node, int line) + __acquires(&node->lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -723,6 +744,7 @@ _binder_node_lock(struct binder_node *node, int line) #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) static void _binder_node_unlock(struct binder_node *node, int line) + __releases(&node->lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -739,12 +761,16 @@ _binder_node_unlock(struct binder_node *node, int line) #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) static void _binder_node_inner_lock(struct binder_node *node, int line) + __acquires(&node->lock) __acquires(&node->proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_lock(&node->lock); if (node->proc) binder_inner_proc_lock(node->proc); + else + /* annotation for sparse */ + __acquire(&node->proc->inner_lock); } /** @@ -756,6 +782,7 @@ _binder_node_inner_lock(struct binder_node *node, int line) #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) static void _binder_node_inner_unlock(struct binder_node *node, int line) + __releases(&node->lock) __releases(&node->proc->inner_lock) { struct binder_proc *proc = node->proc; @@ -763,6 +790,9 @@ _binder_node_inner_unlock(struct binder_node *node, int line) "%s: line=%d\n", __func__, line); if (proc) binder_inner_proc_unlock(proc); + else + /* annotation for sparse */ + __release(&node->proc->inner_lock); spin_unlock(&node->lock); } @@ -789,6 +819,16 @@ static bool binder_worklist_empty(struct binder_proc *proc, return ret; } +/** + * binder_enqueue_work_ilocked() - Add an item to the work list + * @work: struct binder_work to add to list + * @target_list: list to add work to + * + * Adds the work to the specified list. Asserts that work + * is not already on a list. + * + * Requires the proc->inner_lock to be held. + */ static void binder_enqueue_work_ilocked(struct binder_work *work, struct list_head *target_list) @@ -799,22 +839,58 @@ binder_enqueue_work_ilocked(struct binder_work *work, } /** - * binder_enqueue_work() - Add an item to the work list - * @proc: binder_proc associated with list + * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work + * @thread: thread to queue work to * @work: struct binder_work to add to list - * @target_list: list to add work to * - * Adds the work to the specified list. Asserts that work - * is not already on a list. + * Adds the work to the todo list of the thread. Doesn't set the process_todo + * flag, which means that (if it wasn't already set) the thread will go to + * sleep without handling this work when it calls read. + * + * Requires the proc->inner_lock to be held. */ static void -binder_enqueue_work(struct binder_proc *proc, - struct binder_work *work, - struct list_head *target_list) +binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, + struct binder_work *work) { - binder_inner_proc_lock(proc); - binder_enqueue_work_ilocked(work, target_list); - binder_inner_proc_unlock(proc); + WARN_ON(!list_empty(&thread->waiting_thread_node)); + binder_enqueue_work_ilocked(work, &thread->todo); +} + +/** + * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list + * @thread: thread to queue work to + * @work: struct binder_work to add to list + * + * Adds the work to the todo list of the thread, and enables processing + * of the todo queue. + * + * Requires the proc->inner_lock to be held. + */ +static void +binder_enqueue_thread_work_ilocked(struct binder_thread *thread, + struct binder_work *work) +{ + WARN_ON(!list_empty(&thread->waiting_thread_node)); + binder_enqueue_work_ilocked(work, &thread->todo); + thread->process_todo = true; +} + +/** + * binder_enqueue_thread_work() - Add an item to the thread work list + * @thread: thread to queue work to + * @work: struct binder_work to add to list + * + * Adds the work to the todo list of the thread, and enables processing + * of the todo queue. + */ +static void +binder_enqueue_thread_work(struct binder_thread *thread, + struct binder_work *work) +{ + binder_inner_proc_lock(thread->proc); + binder_enqueue_thread_work_ilocked(thread, work); + binder_inner_proc_unlock(thread->proc); } static void @@ -877,70 +953,10 @@ static void binder_free_thread(struct binder_thread *thread); static void binder_free_proc(struct binder_proc *proc); static void binder_inc_node_tmpref_ilocked(struct binder_node *node); -static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) -{ - unsigned long rlim_cur; - unsigned long irqs; - int ret; - - mutex_lock(&proc->files_lock); - if (proc->files == NULL) { - ret = -ESRCH; - goto err; - } - if (!lock_task_sighand(proc->tsk, &irqs)) { - ret = -EMFILE; - goto err; - } - rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); - unlock_task_sighand(proc->tsk, &irqs); - - ret = __alloc_fd(proc->files, 0, rlim_cur, flags); -err: - mutex_unlock(&proc->files_lock); - return ret; -} - -/* - * copied from fd_install - */ -static void task_fd_install( - struct binder_proc *proc, unsigned int fd, struct file *file) -{ - mutex_lock(&proc->files_lock); - if (proc->files) - __fd_install(proc->files, fd, file); - mutex_unlock(&proc->files_lock); -} - -/* - * copied from sys_close - */ -static long task_close_fd(struct binder_proc *proc, unsigned int fd) -{ - int retval; - - mutex_lock(&proc->files_lock); - if (proc->files == NULL) { - retval = -ESRCH; - goto err; - } - retval = __close_fd(proc->files, fd); - /* can't restart close syscall because file table entry was cleared */ - if (unlikely(retval == -ERESTARTSYS || - retval == -ERESTARTNOINTR || - retval == -ERESTARTNOHAND || - retval == -ERESTART_RESTARTBLOCK)) - retval = -EINTR; -err: - mutex_unlock(&proc->files_lock); - return retval; -} - static bool binder_has_work_ilocked(struct binder_thread *thread, bool do_proc_work) { - return !binder_worklist_empty_ilocked(&thread->todo) || + return thread->process_todo || thread->looper_need_return || (do_proc_work && !binder_worklist_empty_ilocked(&thread->proc->todo)); @@ -1166,6 +1182,7 @@ static struct binder_node *binder_init_node_ilocked( node->work.type = BINDER_WORK_NODE; node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); @@ -1227,8 +1244,12 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, } else node->local_strong_refs++; if (!node->has_strong_ref && target_list) { + struct binder_thread *thread = container_of(target_list, + struct binder_thread, todo); binder_dequeue_work_ilocked(&node->work); - binder_enqueue_work_ilocked(&node->work, target_list); + BUG_ON(&thread->todo != target_list); + binder_enqueue_deferred_thread_work_ilocked(thread, + &node->work); } } else { if (!internal) @@ -1239,6 +1260,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, node->debug_id); return -EINVAL; } + /* + * See comment above + */ binder_enqueue_work_ilocked(&node->work, target_list); } } @@ -1379,10 +1403,14 @@ static void binder_dec_node_tmpref(struct binder_node *node) binder_node_inner_lock(node); if (!node->proc) spin_lock(&binder_dead_nodes_lock); + else + __acquire(&binder_dead_nodes_lock); node->tmp_refs--; BUG_ON(node->tmp_refs < 0); if (!node->proc) spin_unlock(&binder_dead_nodes_lock); + else + __release(&binder_dead_nodes_lock); /* * Call binder_dec_node() to check if all refcounts are 0 * and cleanup is needed. Calling with strong=0 and internal=1 @@ -1885,26 +1913,62 @@ static struct binder_thread *binder_get_txn_from( */ static struct binder_thread *binder_get_txn_from_and_acq_inner( struct binder_transaction *t) + __acquires(&t->from->proc->inner_lock) { struct binder_thread *from; from = binder_get_txn_from(t); - if (!from) + if (!from) { + __acquire(&from->proc->inner_lock); return NULL; + } binder_inner_proc_lock(from->proc); if (t->from) { BUG_ON(from != t->from); return from; } binder_inner_proc_unlock(from->proc); + __acquire(&from->proc->inner_lock); binder_thread_dec_tmpref(from); return NULL; } +/** + * binder_free_txn_fixups() - free unprocessed fd fixups + * @t: binder transaction for t->from + * + * If the transaction is being torn down prior to being + * processed by the target process, free all of the + * fd fixups and fput the file structs. It is safe to + * call this function after the fixups have been + * processed -- in that case, the list will be empty. + */ +static void binder_free_txn_fixups(struct binder_transaction *t) +{ + struct binder_txn_fd_fixup *fixup, *tmp; + + list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { + fput(fixup->file); + list_del(&fixup->fixup_entry); + kfree(fixup); + } +} + static void binder_free_transaction(struct binder_transaction *t) { - if (t->buffer) - t->buffer->transaction = NULL; + struct binder_proc *target_proc = t->to_proc; + + if (target_proc) { + binder_inner_proc_lock(target_proc); + if (t->buffer) + t->buffer->transaction = NULL; + binder_inner_proc_unlock(target_proc); + } + /* + * If the transaction has no target_proc, then + * t->buffer->transaction has already been cleared. + */ + binder_free_txn_fixups(t); kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } @@ -1928,18 +1992,26 @@ static void binder_send_failed_reply(struct binder_transaction *t, binder_pop_transaction_ilocked(target_thread, t); if (target_thread->reply_error.cmd == BR_OK) { target_thread->reply_error.cmd = error_code; - binder_enqueue_work_ilocked( - &target_thread->reply_error.work, - &target_thread->todo); + binder_enqueue_thread_work_ilocked( + target_thread, + &target_thread->reply_error.work); wake_up_interruptible(&target_thread->wait); } else { - WARN(1, "Unexpected reply error: %u\n", - target_thread->reply_error.cmd); + /* + * Cannot get here for normal operation, but + * we can if multiple synchronous transactions + * are sent without blocking for responses. + * Just ignore the 2nd error in this case. + */ + pr_warn("Unexpected reply error: %u\n", + target_thread->reply_error.cmd); } binder_inner_proc_unlock(target_thread->proc); binder_thread_dec_tmpref(target_thread); binder_free_transaction(t); return; + } else { + __release(&target_thread->proc->inner_lock); } next = t->from_parent; @@ -1994,8 +2066,8 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) struct binder_object_header *hdr; size_t object_size = 0; - if (offset > buffer->data_size - sizeof(*hdr) || - buffer->data_size < sizeof(*hdr) || + if (buffer->data_size < sizeof(*hdr) || + offset > buffer->data_size - sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) return 0; @@ -2127,6 +2199,64 @@ static bool binder_validate_fixup(struct binder_buffer *b, return (fixup_offset >= last_min_offset); } +/** + * struct binder_task_work_cb - for deferred close + * + * @twork: callback_head for task work + * @fd: fd to close + * + * Structure to pass task work to be handled after + * returning from binder_ioctl() via task_work_add(). + */ +struct binder_task_work_cb { + struct callback_head twork; + struct file *file; +}; + +/** + * binder_do_fd_close() - close list of file descriptors + * @twork: callback head for task work + * + * It is not safe to call ksys_close() during the binder_ioctl() + * function if there is a chance that binder's own file descriptor + * might be closed. This is to meet the requirements for using + * fdget() (see comments for __fget_light()). Therefore use + * task_work_add() to schedule the close operation once we have + * returned from binder_ioctl(). This function is a callback + * for that mechanism and does the actual ksys_close() on the + * given file descriptor. + */ +static void binder_do_fd_close(struct callback_head *twork) +{ + struct binder_task_work_cb *twcb = container_of(twork, + struct binder_task_work_cb, twork); + + fput(twcb->file); + kfree(twcb); +} + +/** + * binder_deferred_fd_close() - schedule a close for the given file-descriptor + * @fd: file-descriptor to close + * + * See comments in binder_do_fd_close(). This function is used to schedule + * a file-descriptor to be closed after returning from binder_ioctl(). + */ +static void binder_deferred_fd_close(int fd) +{ + struct binder_task_work_cb *twcb; + + twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); + if (!twcb) + return; + init_task_work(&twcb->twork, binder_do_fd_close); + __close_fd_get_file_compat(fd, &twcb->file); + if (twcb->file) + task_work_add(current, &twcb->twork, true); + else + kfree(twcb); +} + static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, binder_size_t *failed_at) @@ -2135,7 +2265,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, - "%d buffer release %d, size %zd-%zd, failed at %p\n", + "%d buffer release %d, size %zd-%zd, failed at %pK\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); @@ -2199,12 +2329,17 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } break; case BINDER_TYPE_FD: { - struct binder_fd_object *fp = to_binder_fd_object(hdr); - - binder_debug(BINDER_DEBUG_TRANSACTION, - " fd %d\n", fp->fd); - if (failed_at) - task_close_fd(proc, fp->fd); + /* + * No need to close the file here since user-space + * closes it for for successfully delivered + * transactions. For transactions that weren't + * delivered, the new fd was never allocated so + * there is no need to close and the fput on the + * file is done when the transaction is torn + * down. + */ + WARN_ON(failed_at && + proc->tsk == current->group_leader); } break; case BINDER_TYPE_PTR: /* @@ -2220,12 +2355,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, size_t fd_index; binder_size_t fd_buf_size; + if (proc->tsk != current->group_leader) { + /* + * Nothing to do if running in sender context + * The fd fixups have not been applied so no + * fds need to be closed. + */ + continue; + } + fda = to_binder_fd_array_object(hdr); parent = binder_validate_ptr(buffer, fda->parent, off_start, offp - off_start); if (!parent) { - pr_err("transaction release %d bad parent offset", + pr_err("transaction release %d bad parent offset\n", debug_id); continue; } @@ -2252,7 +2396,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); for (fd_index = 0; fd_index < fda->num_fds; fd_index++) - task_close_fd(proc, fd_array[fd_index]); + binder_deferred_fd_close(fd_array[fd_index]); } break; default: pr_err("transaction release %d bad object type %x\n", @@ -2347,11 +2491,15 @@ static int binder_translate_handle(struct flat_binder_object *fp, fp->cookie = node->cookie; if (node->proc) binder_inner_proc_lock(node->proc); + else + __acquire(&node->proc->inner_lock); binder_inc_node_nilocked(node, fp->hdr.type == BINDER_TYPE_BINDER, 0, NULL); if (node->proc) binder_inner_proc_unlock(node->proc); + else + __release(&node->proc->inner_lock); trace_binder_transaction_ref_to_node(t, node, &src_rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> node %d u%016llx\n", @@ -2384,17 +2532,18 @@ static int binder_translate_handle(struct flat_binder_object *fp, return ret; } -static int binder_translate_fd(int fd, +static int binder_translate_fd(u32 *fdp, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) { struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; - int target_fd; + struct binder_txn_fd_fixup *fixup; struct file *file; - int ret; + int ret = 0; bool target_allows_fd; + int fd = *fdp; if (in_reply_to) target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); @@ -2422,19 +2571,24 @@ static int binder_translate_fd(int fd, goto err_security; } - target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); - if (target_fd < 0) { + /* + * Add fixup record for this transaction. The allocation + * of the fd in the target needs to be done from a + * target thread. + */ + fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); + if (!fixup) { ret = -ENOMEM; - goto err_get_unused_fd; + goto err_alloc; } - task_fd_install(target_proc, target_fd, file); - trace_binder_transaction_fd(t, fd, target_fd); - binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", - fd, target_fd); + fixup->file = file; + fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data; + trace_binder_transaction_fd_send(t, fd, fixup->offset); + list_add_tail(&fixup->fixup_entry, &t->fd_fixups); - return target_fd; + return ret; -err_get_unused_fd: +err_alloc: err_security: fput(file); err_fget: @@ -2448,8 +2602,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, struct binder_thread *thread, struct binder_transaction *in_reply_to) { - binder_size_t fdi, fd_buf_size, num_installed_fds; - int target_fd; + binder_size_t fdi, fd_buf_size; uintptr_t parent_buffer; u32 *fd_array; struct binder_proc *proc = thread->proc; @@ -2481,23 +2634,12 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, return -EINVAL; } for (fdi = 0; fdi < fda->num_fds; fdi++) { - target_fd = binder_translate_fd(fd_array[fdi], t, thread, + int ret = binder_translate_fd(&fd_array[fdi], t, thread, in_reply_to); - if (target_fd < 0) - goto err_translate_fd_failed; - fd_array[fdi] = target_fd; + if (ret < 0) + return ret; } return 0; - -err_translate_fd_failed: - /* - * Failed to allocate fd or security error, free fds - * installed so far. - */ - num_installed_fds = fdi; - for (fdi = 0; fdi < num_installed_fds; fdi++) - task_close_fd(target_proc, fd_array[fdi]); - return target_fd; } static int binder_fixup_parent(struct binder_transaction *t, @@ -2569,20 +2711,18 @@ static bool binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, struct binder_thread *thread) { - struct list_head *target_list = NULL; struct binder_node *node = t->buffer->target_node; bool oneway = !!(t->flags & TF_ONE_WAY); - bool wakeup = true; + bool pending_async = false; BUG_ON(!node); binder_node_lock(node); if (oneway) { BUG_ON(thread); if (node->has_async_transaction) { - target_list = &node->async_todo; - wakeup = false; + pending_async = true; } else { - node->has_async_transaction = 1; + node->has_async_transaction = true; } } @@ -2594,19 +2734,17 @@ static bool binder_proc_transaction(struct binder_transaction *t, return false; } - if (!thread && !target_list) + if (!thread && !pending_async) thread = binder_select_thread_ilocked(proc); if (thread) - target_list = &thread->todo; - else if (!target_list) - target_list = &proc->todo; + binder_enqueue_thread_work_ilocked(thread, &t->work); + else if (!pending_async) + binder_enqueue_work_ilocked(&t->work, &proc->todo); else - BUG_ON(target_list != &node->async_todo); - - binder_enqueue_work_ilocked(&t->work, target_list); + binder_enqueue_work_ilocked(&t->work, &node->async_todo); - if (wakeup) + if (!pending_async) binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); binder_inner_proc_unlock(proc); @@ -2664,6 +2802,7 @@ static void binder_transaction(struct binder_proc *proc, { int ret; struct binder_transaction *t; + struct binder_work *w; struct binder_work *tcomplete; binder_size_t *offp, *off_end, *off_start; binder_size_t off_min; @@ -2680,6 +2819,8 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t last_fixup_min_off = 0; struct binder_context *context = proc->context; int t_debug_id = atomic_inc_return(&binder_last_id); + char *secctx = NULL; + u32 secctx_sz = 0; e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; @@ -2724,6 +2865,8 @@ static void binder_transaction(struct binder_proc *proc, binder_set_nice(in_reply_to->saved_priority); target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); if (target_thread == NULL) { + /* annotation for sparse */ + __release(&target_thread->proc->inner_lock); return_error = BR_DEAD_REPLY; return_error_line = __LINE__; goto err_dead_binder; @@ -2779,6 +2922,14 @@ static void binder_transaction(struct binder_proc *proc, else return_error = BR_DEAD_REPLY; mutex_unlock(&context->context_mgr_node_lock); + if (target_node && target_proc->pid == proc->pid) { + binder_user_error("%d:%d got transaction to context manager from process owning it\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_invalid_target_handle; + } } if (!target_node) { /* @@ -2797,6 +2948,29 @@ static void binder_transaction(struct binder_proc *proc, goto err_invalid_target_handle; } binder_inner_proc_lock(proc); + + w = list_first_entry_or_null(&thread->todo, + struct binder_work, entry); + if (!(tr->flags & TF_ONE_WAY) && w && + w->type == BINDER_WORK_TRANSACTION) { + /* + * Do not allow new outgoing transaction from a + * thread that has a transaction at the head of + * its todo list. Only need to check the head + * because binder_select_thread_ilocked picks a + * thread from proc->waiting_threads to enqueue + * the transaction, and nothing is queued to the + * todo list while the thread is on waiting_threads. + */ + binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", + proc->pid, thread->pid); + binder_inner_proc_unlock(proc); + return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; + goto err_bad_todo_list; + } + if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; @@ -2844,6 +3018,7 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_alloc_t_failed; } + INIT_LIST_HEAD(&t->fd_fixups); binder_stats_created(BINDER_STAT_TRANSACTION); spin_lock_init(&t->lock); @@ -2888,6 +3063,25 @@ static void binder_transaction(struct binder_proc *proc, t->flags = tr->flags; t->priority = task_nice(current); + +// Remove the security check for kernel under 5.0 +/* + + if (target_node && target_node->txn_security_ctx) { + u32 secid; + + security_task_getsecid(proc->tsk, &secid); + ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); + if (ret) { + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_get_secctx_failed; + } + extra_buffers_size += ALIGN(secctx_sz, sizeof(u64)); + } +*/ + trace_binder_transaction(reply, t, target_node); t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, @@ -2904,7 +3098,19 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = NULL; goto err_binder_alloc_buf_failed; } - t->buffer->allow_user_free = 0; + if (secctx) { + size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + + ALIGN(tr->offsets_size, sizeof(void *)) + + ALIGN(extra_buffers_size, sizeof(void *)) - + ALIGN(secctx_sz, sizeof(u64)); + char *kptr = t->buffer->data + buf_offset; + + t->security_ctx = (uintptr_t)kptr + + binder_alloc_get_user_buffer_offset(&target_proc->alloc); + memcpy(kptr, secctx, secctx_sz); + security_release_secctx(secctx, secctx_sz); + secctx = NULL; + } t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; @@ -2999,17 +3205,16 @@ static void binder_transaction(struct binder_proc *proc, case BINDER_TYPE_FD: { struct binder_fd_object *fp = to_binder_fd_object(hdr); - int target_fd = binder_translate_fd(fp->fd, t, thread, - in_reply_to); + int ret = binder_translate_fd(&fp->fd, t, thread, + in_reply_to); - if (target_fd < 0) { + if (ret < 0) { return_error = BR_FAILED_REPLY; - return_error_param = target_fd; + return_error_param = ret; return_error_line = __LINE__; goto err_translate_failed; } fp->pad_binder = 0; - fp->fd = target_fd; } break; case BINDER_TYPE_FDA: { struct binder_fd_array_object *fda = @@ -3101,10 +3306,10 @@ static void binder_transaction(struct binder_proc *proc, } } tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; - binder_enqueue_work(proc, tcomplete, &thread->todo); t->work.type = BINDER_WORK_TRANSACTION; if (reply) { + binder_enqueue_thread_work(thread, tcomplete); binder_inner_proc_lock(target_proc); if (target_thread->is_dead) { binder_inner_proc_unlock(target_proc); @@ -3112,13 +3317,21 @@ static void binder_transaction(struct binder_proc *proc, } BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction_ilocked(target_thread, in_reply_to); - binder_enqueue_work_ilocked(&t->work, &target_thread->todo); + binder_enqueue_thread_work_ilocked(target_thread, &t->work); binder_inner_proc_unlock(target_proc); wake_up_interruptible_sync(&target_thread->wait); binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); binder_inner_proc_lock(proc); + /* + * Defer the TRANSACTION_COMPLETE, so we don't return to + * userspace immediately; this allows the target process to + * immediately start processing this transaction, reducing + * latency. We will then return the TRANSACTION_COMPLETE when + * the target replies (or there is an error). + */ + binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; @@ -3132,6 +3345,7 @@ static void binder_transaction(struct binder_proc *proc, } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); + binder_enqueue_thread_work(thread, tcomplete); if (!binder_proc_transaction(t, target_proc, NULL)) goto err_dead_proc_or_thread; } @@ -3157,6 +3371,7 @@ static void binder_transaction(struct binder_proc *proc, err_bad_offset: err_bad_parent: err_copy_data_failed: + binder_free_txn_fixups(t); trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, t->buffer, offp); if (target_node) @@ -3165,12 +3380,18 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->transaction = NULL; binder_alloc_free_buf(&target_proc->alloc, t->buffer); err_binder_alloc_buf_failed: + if (secctx) + security_release_secctx(secctx, secctx_sz); +/* +err_get_secctx_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); +*/ err_alloc_tcomplete_failed: kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: +err_bad_todo_list: err_bad_call_stack: err_empty_call_stack: err_dead_binder: @@ -3210,18 +3431,57 @@ static void binder_transaction(struct binder_proc *proc, BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { thread->return_error.cmd = BR_TRANSACTION_COMPLETE; - binder_enqueue_work(thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); } else { thread->return_error.cmd = return_error; - binder_enqueue_work(thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work(thread, &thread->return_error.work); } } +/** + * binder_free_buf() - free the specified buffer + * @proc: binder proc that owns buffer + * @buffer: buffer to be freed + * + * If buffer for an async transaction, enqueue the next async + * transaction from the node. + * + * Cleanup buffer and free it. + */ +static void +binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) +{ + binder_inner_proc_lock(proc); + if (buffer->transaction) { + buffer->transaction->buffer = NULL; + buffer->transaction = NULL; + } + binder_inner_proc_unlock(proc); + if (buffer->async_transaction && buffer->target_node) { + struct binder_node *buf_node; + struct binder_work *w; + + buf_node = buffer->target_node; + binder_node_inner_lock(buf_node); + BUG_ON(!buf_node->has_async_transaction); + BUG_ON(buf_node->proc != proc); + w = binder_dequeue_work_head_ilocked( + &buf_node->async_todo); + if (!w) { + buf_node->has_async_transaction = false; + } else { + binder_enqueue_work_ilocked( + w, &proc->todo); + binder_wakeup_proc_ilocked(proc); + } + binder_node_inner_unlock(buf_node); + } + trace_binder_transaction_buffer_release(buffer); + binder_transaction_buffer_release(proc, buffer, NULL); + binder_alloc_free_buf(&proc->alloc, buffer); +} + static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, @@ -3393,14 +3653,18 @@ static int binder_thread_write(struct binder_proc *proc, buffer = binder_alloc_prepare_to_free(&proc->alloc, data_ptr); - if (buffer == NULL) { - binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", - proc->pid, thread->pid, (u64)data_ptr); - break; - } - if (!buffer->allow_user_free) { - binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", - proc->pid, thread->pid, (u64)data_ptr); + if (IS_ERR_OR_NULL(buffer)) { + if (PTR_ERR(buffer) == -EPERM) { + binder_user_error( + "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", + proc->pid, thread->pid, + (u64)data_ptr); + } else { + binder_user_error( + "%d:%d BC_FREE_BUFFER u%016llx no match\n", + proc->pid, thread->pid, + (u64)data_ptr); + } break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, @@ -3408,33 +3672,7 @@ static int binder_thread_write(struct binder_proc *proc, proc->pid, thread->pid, (u64)data_ptr, buffer->debug_id, buffer->transaction ? "active" : "finished"); - - if (buffer->transaction) { - buffer->transaction->buffer = NULL; - buffer->transaction = NULL; - } - if (buffer->async_transaction && buffer->target_node) { - struct binder_node *buf_node; - struct binder_work *w; - - buf_node = buffer->target_node; - binder_node_inner_lock(buf_node); - BUG_ON(!buf_node->has_async_transaction); - BUG_ON(buf_node->proc != proc); - w = binder_dequeue_work_head_ilocked( - &buf_node->async_todo); - if (!w) { - buf_node->has_async_transaction = 0; - } else { - binder_enqueue_work_ilocked( - w, &proc->todo); - binder_wakeup_proc_ilocked(proc); - } - binder_node_inner_unlock(buf_node); - } - trace_binder_transaction_buffer_release(buffer); - binder_transaction_buffer_release(proc, buffer, NULL); - binder_alloc_free_buf(&proc->alloc, buffer); + binder_free_buf(proc, buffer); break; } @@ -3522,10 +3760,9 @@ static int binder_thread_write(struct binder_proc *proc, WARN_ON(thread->return_error.cmd != BR_OK); thread->return_error.cmd = BR_ERROR; - binder_enqueue_work( - thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work( + thread, + &thread->return_error.work); binder_debug( BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", @@ -3605,9 +3842,9 @@ static int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_work_ilocked( - &death->work, - &thread->todo); + binder_enqueue_thread_work_ilocked( + thread, + &death->work); else { binder_enqueue_work_ilocked( &death->work, @@ -3647,7 +3884,7 @@ static int binder_thread_write(struct binder_proc *proc, } } binder_debug(BINDER_DEBUG_DEAD_BINDER, - "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", + "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", proc->pid, thread->pid, (u64)cookie, death); if (death == NULL) { @@ -3662,8 +3899,8 @@ static int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_work_ilocked( - &death->work, &thread->todo); + binder_enqueue_thread_work_ilocked( + thread, &death->work); else { binder_enqueue_work_ilocked( &death->work, @@ -3758,6 +3995,76 @@ static int binder_wait_for_work(struct binder_thread *thread, return ret; } +/** + * binder_apply_fd_fixups() - finish fd translation + * @t: binder transaction with list of fd fixups + * + * Now that we are in the context of the transaction target + * process, we can allocate and install fds. Process the + * list of fds to translate and fixup the buffer with the + * new fds. + * + * If we fail to allocate an fd, then free the resources by + * fput'ing files that have not been processed and ksys_close'ing + * any fds that have already been allocated. + */ +static int binder_apply_fd_fixups(struct binder_transaction *t) +{ + struct binder_txn_fd_fixup *fixup, *tmp; + int ret = 0; + + list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { + int fd = get_unused_fd_flags(O_CLOEXEC); + u32 *fdp; + + if (fd < 0) { + binder_debug(BINDER_DEBUG_TRANSACTION, + "failed fd fixup txn %d fd %d\n", + t->debug_id, fd); + ret = -ENOMEM; + break; + } + binder_debug(BINDER_DEBUG_TRANSACTION, + "fd fixup txn %d fd %d\n", + t->debug_id, fd); + trace_binder_transaction_fd_recv(t, fd, fixup->offset); + fd_install(fd, fixup->file); + fixup->file = NULL; + fdp = (u32 *)(t->buffer->data + fixup->offset); + /* + * This store can cause problems for CPUs with a + * VIVT cache (eg ARMv5) since the cache cannot + * detect virtual aliases to the same physical cacheline. + * To support VIVT, this address and the user-space VA + * would both need to be flushed. Since this kernel + * VA is not constructed via page_to_virt(), we can't + * use flush_dcache_page() on it, so we'd have to use + * an internal function. If devices with VIVT ever + * need to run Android, we'll either need to go back + * to patching the translated fd from the sender side + * (using the non-standard kernel functions), or rework + * how the kernel uses the buffer to use page_to_virt() + * addresses instead of allocating in our own vm area. + * + * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT. + */ + *fdp = fd; + } + list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { + if (fixup->file) { + fput(fixup->file); + } else if (ret) { + u32 *fdp = (u32 *)(t->buffer->data + fixup->offset); + + binder_deferred_fd_close(*fdp); + } + list_del(&fixup->fixup_entry); + kfree(fixup); + } + + return ret; +} + static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, @@ -3811,11 +4118,13 @@ static int binder_thread_read(struct binder_proc *proc, while (1) { uint32_t cmd; - struct binder_transaction_data tr; + struct binder_transaction_data_secctx tr; + struct binder_transaction_data *trd = &tr.transaction_data; struct binder_work *w = NULL; struct list_head *list = NULL; struct binder_transaction *t = NULL; struct binder_thread *t_from; + size_t trsize = sizeof(*trd); binder_inner_proc_lock(proc); if (!binder_worklist_empty_ilocked(&thread->todo)) @@ -3837,6 +4146,8 @@ static int binder_thread_read(struct binder_proc *proc, break; } w = binder_dequeue_work_head_ilocked(list); + if (binder_worklist_empty_ilocked(&thread->todo)) + thread->process_todo = false; switch (w->type) { case BINDER_WORK_TRANSACTION: { @@ -3851,14 +4162,17 @@ static int binder_thread_read(struct binder_proc *proc, binder_inner_proc_unlock(proc); if (put_user(e->cmd, (uint32_t __user *)ptr)) return -EFAULT; + cmd = e->cmd; e->cmd = BR_OK; ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, e->cmd); + binder_stat_br(proc, thread, cmd); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_inner_proc_unlock(proc); cmd = BR_TRANSACTION_COMPLETE; + kfree(w); + binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); @@ -3867,8 +4181,6 @@ static int binder_thread_read(struct binder_proc *proc, binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); - kfree(w); - binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); @@ -3998,6 +4310,11 @@ static int binder_thread_read(struct binder_proc *proc, if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; + default: + binder_inner_proc_unlock(proc); + pr_err("%d:%d: bad work type %d\n", + proc->pid, thread->pid, w->type); + break; } if (!t) @@ -4007,8 +4324,8 @@ static int binder_thread_read(struct binder_proc *proc, if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; - tr.target.ptr = target_node->ptr; - tr.cookie = target_node->cookie; + trd->target.ptr = target_node->ptr; + trd->cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) @@ -4018,33 +4335,67 @@ static int binder_thread_read(struct binder_proc *proc, binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { - tr.target.ptr = 0; - tr.cookie = 0; + trd->target.ptr = 0; + trd->cookie = 0; cmd = BR_REPLY; } - tr.code = t->code; - tr.flags = t->flags; - tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); + trd->code = t->code; + trd->flags = t->flags; + trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); t_from = binder_get_txn_from(t); if (t_from) { struct task_struct *sender = t_from->proc->tsk; - tr.sender_pid = task_tgid_nr_ns(sender, - task_active_pid_ns(current)); + trd->sender_pid = + task_tgid_nr_ns(sender, + task_active_pid_ns(current)); } else { - tr.sender_pid = 0; + trd->sender_pid = 0; } - tr.data_size = t->buffer->data_size; - tr.offsets_size = t->buffer->offsets_size; - tr.data.ptr.buffer = (binder_uintptr_t) + ret = binder_apply_fd_fixups(t); + if (ret) { + struct binder_buffer *buffer = t->buffer; + bool oneway = !!(t->flags & TF_ONE_WAY); + int tid = t->debug_id; + + if (t_from) + binder_thread_dec_tmpref(t_from); + buffer->transaction = NULL; + binder_cleanup_transaction(t, "fd fixups failed", + BR_FAILED_REPLY); + binder_free_buf(proc, buffer); + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", + proc->pid, thread->pid, + oneway ? "async " : + (cmd == BR_REPLY ? "reply " : ""), + tid, BR_FAILED_REPLY, ret, __LINE__); + if (cmd == BR_REPLY) { + cmd = BR_FAILED_REPLY; + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + binder_stat_br(proc, thread, cmd); + break; + } + continue; + } + trd->data_size = t->buffer->data_size; + trd->offsets_size = t->buffer->offsets_size; + trd->data.ptr.buffer = (binder_uintptr_t) ((uintptr_t)t->buffer->data + binder_alloc_get_user_buffer_offset(&proc->alloc)); - tr.data.ptr.offsets = tr.data.ptr.buffer + + trd->data.ptr.offsets = trd->data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); + tr.secctx = t->security_ctx; + if (t->security_ctx) { + cmd = BR_TRANSACTION_SEC_CTX; + trsize = sizeof(tr); + } if (put_user(cmd, (uint32_t __user *)ptr)) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4055,7 +4406,7 @@ static int binder_thread_read(struct binder_proc *proc, return -EFAULT; } ptr += sizeof(uint32_t); - if (copy_to_user(ptr, &tr, sizeof(tr))) { + if (copy_to_user(ptr, &tr, trsize)) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4064,7 +4415,7 @@ static int binder_thread_read(struct binder_proc *proc, return -EFAULT; } - ptr += sizeof(tr); + ptr += trsize; trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); @@ -4072,16 +4423,18 @@ static int binder_thread_read(struct binder_proc *proc, "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : - "BR_REPLY", + (cmd == BR_TRANSACTION_SEC_CTX) ? + "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", t->debug_id, t_from ? t_from->proc->pid : 0, t_from ? t_from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, - (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); + (u64)trd->data.ptr.buffer, + (u64)trd->data.ptr.offsets); if (t_from) binder_thread_dec_tmpref(t_from); t->buffer->allow_user_free = 1; - if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { + if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { binder_inner_proc_lock(thread->proc); t->to_parent = thread->transaction_stack; t->to_thread = thread; @@ -4273,6 +4626,8 @@ static int binder_thread_release(struct binder_proc *proc, spin_lock(&t->lock); if (t->to_thread == thread) send_reply = t; + } else { + __acquire(&t->lock); } thread->is_dead = true; @@ -4301,7 +4656,11 @@ static int binder_thread_release(struct binder_proc *proc, spin_unlock(&last_t->lock); if (t) spin_lock(&t->lock); + else + __acquire(&t->lock); } + /* annotation for sparse, lock not acquired in last iteration above */ + __release(&t->lock); /* * If this thread used poll, make sure we remove the waitqueue @@ -4311,11 +4670,20 @@ static int binder_thread_release(struct binder_proc *proc, */ if ((thread->looper & BINDER_LOOPER_STATE_POLL) && waitqueue_active(&thread->wait)) { - wake_up_poll(&thread->wait, POLLHUP | POLLFREE); + wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); } binder_inner_proc_unlock(thread->proc); + /* + * This is needed to avoid races between wake_up_poll() above and + * and ep_remove_waitqueue() called for other reasons (eg the epoll file + * descriptor being closed); ep_remove_waitqueue() holds an RCU read + * lock, so we can be sure it's done after calling synchronize_rcu(). + */ + if (thread->looper & BINDER_LOOPER_STATE_POLL) + synchronize_rcu(); + if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); binder_release_work(proc, &thread->todo); @@ -4323,7 +4691,7 @@ static int binder_thread_release(struct binder_proc *proc, return active_transactions; } -static unsigned int binder_poll(struct file *filp, +static __poll_t binder_poll(struct file *filp, struct poll_table_struct *wait) { struct binder_proc *proc = filp->private_data; @@ -4331,6 +4699,8 @@ static unsigned int binder_poll(struct file *filp, bool wait_for_proc_work; thread = binder_get_thread(proc); + if (!thread) + return POLLERR; binder_inner_proc_lock(thread->proc); thread->looper |= BINDER_LOOPER_STATE_POLL; @@ -4341,7 +4711,7 @@ static unsigned int binder_poll(struct file *filp, poll_wait(filp, &thread->wait, wait); if (binder_has_work(thread, wait_for_proc_work)) - return POLLIN; + return EPOLLIN; return 0; } @@ -4412,7 +4782,8 @@ static int binder_ioctl_write_read(struct file *filp, return ret; } -static int binder_ioctl_set_ctx_mgr(struct file *filp) +static int binder_ioctl_set_ctx_mgr(struct file *filp, + struct flat_binder_object *fbo) { int ret = 0; struct binder_proc *proc = filp->private_data; @@ -4441,7 +4812,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) } else { context->binder_context_mgr_uid = curr_euid; } - new_node = binder_new_node(proc, NULL); + new_node = binder_new_node(proc, fbo); if (!new_node) { ret = -ENOMEM; goto out; @@ -4459,6 +4830,42 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) return ret; } +static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, + struct binder_node_info_for_ref *info) +{ + struct binder_node *node; + struct binder_context *context = proc->context; + __u32 handle = info->handle; + + if (info->strong_count || info->weak_count || info->reserved1 || + info->reserved2 || info->reserved3) { + binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", + proc->pid); + return -EINVAL; + } + + /* This ioctl may only be used by the context manager */ + mutex_lock(&context->context_mgr_node_lock); + if (!context->binder_context_mgr_node || + context->binder_context_mgr_node->proc != proc) { + mutex_unlock(&context->context_mgr_node_lock); + return -EPERM; + } + mutex_unlock(&context->context_mgr_node_lock); + + node = binder_get_node_from_ref(proc, handle, true, NULL); + if (!node) + return -EINVAL; + + info->strong_count = node->local_strong_refs + + node->internal_strong_refs; + info->weak_count = node->local_weak_refs; + + binder_put_node(node); + + return 0; +} + static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, struct binder_node_debug_info *info) { @@ -4528,8 +4935,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) binder_inner_proc_unlock(proc); break; } + case BINDER_SET_CONTEXT_MGR_EXT: { + struct flat_binder_object fbo; + + if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { + ret = -EINVAL; + goto err; + } + ret = binder_ioctl_set_ctx_mgr(filp, &fbo); + if (ret) + goto err; + break; + } case BINDER_SET_CONTEXT_MGR: - ret = binder_ioctl_set_ctx_mgr(filp); + ret = binder_ioctl_set_ctx_mgr(filp, NULL); if (ret) goto err; break; @@ -4553,6 +4972,25 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; } + case BINDER_GET_NODE_INFO_FOR_REF: { + struct binder_node_info_for_ref info; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + ret = binder_ioctl_get_node_info_for_ref(proc, &info); + if (ret < 0) + goto err; + + if (copy_to_user(ubuf, &info, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + break; + } case BINDER_GET_NODE_DEBUG_INFO: { struct binder_node_debug_info info; @@ -4608,14 +5046,9 @@ static void binder_vma_close(struct vm_area_struct *vma) (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); binder_alloc_vma_close(&proc->alloc); - binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) -static int binder_vm_fault(struct vm_fault *vmf) -#else -static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -#endif +static vm_fault_t binder_vm_fault(struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } @@ -4649,20 +5082,19 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) failure_string = "bad vm_flags"; goto err_bad_arg; } - vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; + vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; + vma->vm_flags &= ~VM_MAYWRITE; + vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; ret = binder_alloc_mmap_handler(&proc->alloc, vma); if (ret) return ret; - mutex_lock(&proc->files_lock); - proc->files = get_files_struct(current); - mutex_unlock(&proc->files_lock); return 0; err_bad_arg: - pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", + pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } @@ -4672,7 +5104,7 @@ static int binder_open(struct inode *nodp, struct file *filp) struct binder_proc *proc; struct binder_device *binder_dev; - binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", + binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); @@ -4682,7 +5114,6 @@ static int binder_open(struct inode *nodp, struct file *filp) spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; - mutex_init(&proc->files_lock); INIT_LIST_HEAD(&proc->todo); proc->default_priority = task_nice(current); /* binderfs stashes devices in i_private */ @@ -4715,10 +5146,10 @@ static int binder_open(struct inode *nodp, struct file *filp) * anyway print all contexts that a given PID has, so this * is not a problem. */ - proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, + proc->debugfs_entry = debugfs_create_file(strbuf, 0444, binder_debugfs_dir_entry_proc, (void *)(unsigned long)proc->pid, - &binder_proc_fops); + &proc_fops); } return 0; @@ -4836,8 +5267,6 @@ static void binder_deferred_release(struct binder_proc *proc) struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; - BUG_ON(proc->files); - mutex_lock(&binder_procs_lock); hlist_del(&proc->proc_node); mutex_unlock(&binder_procs_lock); @@ -4919,7 +5348,6 @@ static void binder_deferred_release(struct binder_proc *proc) static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; - struct files_struct *files; int defer; @@ -4937,23 +5365,11 @@ static void binder_deferred_func(struct work_struct *work) } mutex_unlock(&binder_deferred_lock); - files = NULL; - if (defer & BINDER_DEFERRED_PUT_FILES) { - mutex_lock(&proc->files_lock); - files = proc->files; - if (files) - proc->files = NULL; - mutex_unlock(&proc->files_lock); - } - if (defer & BINDER_DEFERRED_FLUSH) binder_deferred_flush(proc); if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ - - if (files) - put_files_struct(files); } while (proc); } static DECLARE_WORK(binder_deferred_work, binder_deferred_func); @@ -4982,7 +5398,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, spin_lock(&t->lock); to_proc = t->to_proc; seq_printf(m, - "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", + "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, @@ -5006,7 +5422,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, } if (buffer->target_node) seq_printf(m, " node %d", buffer->target_node->debug_id); - seq_printf(m, " size %zd:%zd data %p\n", + seq_printf(m, " size %zd:%zd data %pK\n", buffer->data_size, buffer->offsets_size, buffer->data); } @@ -5157,6 +5573,9 @@ static void print_binder_proc(struct seq_file *m, for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); + if (!print_all && !node->has_async_transaction) + continue; + /* * take a temporary reference on the node so it * survives and isn't removed from the tree @@ -5361,7 +5780,7 @@ static void print_binder_proc_stats(struct seq_file *m, } -static int binder_state_show(struct seq_file *m, void *unused) +static int state_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct binder_node *node; @@ -5400,7 +5819,7 @@ static int binder_state_show(struct seq_file *m, void *unused) return 0; } -static int binder_stats_show(struct seq_file *m, void *unused) +static int stats_show(struct seq_file *m, void *unused) { struct binder_proc *proc; @@ -5416,7 +5835,7 @@ static int binder_stats_show(struct seq_file *m, void *unused) return 0; } -static int binder_transactions_show(struct seq_file *m, void *unused) +static int transactions_show(struct seq_file *m, void *unused) { struct binder_proc *proc; @@ -5429,7 +5848,7 @@ static int binder_transactions_show(struct seq_file *m, void *unused) return 0; } -static int binder_proc_show(struct seq_file *m, void *unused) +static int proc_show(struct seq_file *m, void *unused) { struct binder_proc *itr; int pid = (unsigned long)m->private; @@ -5472,7 +5891,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m, "\n" : " (incomplete)\n"); } -static int binder_transaction_log_show(struct seq_file *m, void *unused) +static int transaction_log_show(struct seq_file *m, void *unused) { struct binder_transaction_log *log = m->private; unsigned int log_cur = atomic_read(&log->cur); @@ -5504,12 +5923,10 @@ const struct file_operations binder_fops = { .release = binder_release, }; -EXPORT_SYMBOL(binder_fops); - -BINDER_DEBUG_ENTRY(state); -BINDER_DEBUG_ENTRY(stats); -BINDER_DEBUG_ENTRY(transactions); -BINDER_DEBUG_ENTRY(transaction_log); +DEFINE_SHOW_ATTRIBUTE(state); +DEFINE_SHOW_ATTRIBUTE(stats); +DEFINE_SHOW_ATTRIBUTE(transactions); +DEFINE_SHOW_ATTRIBUTE(transaction_log); static int __init init_binder_device(const char *name) { @@ -5542,11 +5959,14 @@ static int __init init_binder_device(const char *name) static int __init binder_init(void) { int ret; - char *device_name, *device_names, *device_tmp; + char *device_name, *device_tmp; struct binder_device *device; struct hlist_node *tmp; + char *device_names = NULL; - binder_alloc_shrinker_init(); + ret = binder_alloc_shrinker_init(); + if (ret) + return ret; atomic_set(&binder_transaction_log.cur, ~0U); atomic_set(&binder_transaction_log_failed.cur, ~0U); @@ -5558,50 +5978,55 @@ static int __init binder_init(void) if (binder_debugfs_dir_entry_root) { debugfs_create_file("state", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, NULL, - &binder_state_fops); + &state_fops); debugfs_create_file("stats", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, NULL, - &binder_stats_fops); + &stats_fops); debugfs_create_file("transactions", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, NULL, - &binder_transactions_fops); + &transactions_fops); debugfs_create_file("transaction_log", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, &binder_transaction_log, - &binder_transaction_log_fops); + &transaction_log_fops); debugfs_create_file("failed_transaction_log", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, &binder_transaction_log_failed, - &binder_transaction_log_fops); + &transaction_log_fops); } - /* - * Copy the module_parameter string, because we don't want to - * tokenize it in-place. - */ - device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); - if (!device_names) { - ret = -ENOMEM; - goto err_alloc_device_names_failed; - } - strcpy(device_names, binder_devices_param); + if (strcmp(binder_devices_param, "") != 0) { + /* + * Copy the module_parameter string, because we don't want to + * tokenize it in-place. + */ + device_names = kstrdup(binder_devices_param, GFP_KERNEL); + if (!device_names) { + ret = -ENOMEM; + goto err_alloc_device_names_failed; + } - device_tmp = device_names; - while ((device_name = strsep(&device_tmp, ","))) { - ret = init_binder_device(device_name); - if (ret) - goto err_init_binder_device_failed; + device_tmp = device_names; + while ((device_name = strsep(&device_tmp, ","))) { + ret = init_binder_device(device_name); + if (ret) + goto err_init_binder_device_failed; + } } + ret = init_binderfs(); + if (ret) + goto err_init_binder_device_failed; + return ret; err_init_binder_device_failed: @@ -5619,25 +6044,20 @@ static int __init binder_init(void) return ret; } -static void __exit binder_exit(void) -{ - struct binder_device *device; - struct hlist_node *tmp; - - hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { - misc_deregister(&device->miscdev); - kfree(device->context.name); - hlist_del(&device->hlist); - kfree(device); - } - - debugfs_remove_recursive(binder_debugfs_dir_entry_root); -} - module_init(binder_init); -module_exit(binder_exit); +/* + * binder will have no exit function since binderfs instances can be mounted + * multiple times and also in user namespaces finding and destroying them all + * is not feasible without introducing insane locking. Just ignoring existing + * instances on module unload also wouldn't work since we would loose track of + * what major numer was dynamically allocated and also what minor numbers are + * already given out. So this would get us into all kinds of issues with device + * number reuse. So simply don't allow unloading unless we are forced to do so. + */ + +MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Driver for Android binder device"); +MODULE_LICENSE("GPL v2"); #define CREATE_TRACE_POINTS #include "binder_trace.h" - -MODULE_LICENSE("GPL v2"); diff --git a/binder/binder.h b/binder/binder.h new file mode 100644 index 0000000..35aac58 --- /dev/null +++ b/binder/binder.h @@ -0,0 +1,504 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2008 Google, Inc. + * + * Based on, but no longer compatible with, the original + * OpenBinder.org binder driver interface, which is: + * + * Copyright (c) 2005 Palmsource, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_BINDER_H +#define _UAPI_LINUX_BINDER_H + +#include +#include + +#define B_PACK_CHARS(c1, c2, c3, c4) \ + ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) +#define B_TYPE_LARGE 0x85 + +#ifndef EPOLLHUP +#define EPOLLHUP POLLHUP +#endif + +#ifndef EPOLLIN +#define EPOLLIN POLLIN +#endif + +enum { + BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), + BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), + BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), + BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), + BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), + BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE), + BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE), +}; + +enum { + FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, + FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, + + /** + * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts + * + * Only when set, causes senders to include their security + * context + */ + FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000, +}; + +#ifdef BINDER_IPC_32BIT +typedef __u32 binder_size_t; +typedef __u32 binder_uintptr_t; +#else +typedef __u64 binder_size_t; +typedef __u64 binder_uintptr_t; +#endif + +/** + * struct binder_object_header - header shared by all binder metadata objects. + * @type: type of the object + */ +struct binder_object_header { + __u32 type; +}; + +/* + * This is the flattened representation of a Binder object for transfer + * between processes. The 'offsets' supplied as part of a binder transaction + * contains offsets into the data where these structures occur. The Binder + * driver takes care of re-writing the structure type and data as it moves + * between processes. + */ +struct flat_binder_object { + struct binder_object_header hdr; + __u32 flags; + + /* 8 bytes of data. */ + union { + binder_uintptr_t binder; /* local object */ + __u32 handle; /* remote object */ + }; + + /* extra data associated with local object */ + binder_uintptr_t cookie; +}; + +/** + * struct binder_fd_object - describes a filedescriptor to be fixed up. + * @hdr: common header structure + * @pad_flags: padding to remain compatible with old userspace code + * @pad_binder: padding to remain compatible with old userspace code + * @fd: file descriptor + * @cookie: opaque data, used by user-space + */ +struct binder_fd_object { + struct binder_object_header hdr; + __u32 pad_flags; + union { + binder_uintptr_t pad_binder; + __u32 fd; + }; + + binder_uintptr_t cookie; +}; + +/* struct binder_buffer_object - object describing a userspace buffer + * @hdr: common header structure + * @flags: one or more BINDER_BUFFER_* flags + * @buffer: address of the buffer + * @length: length of the buffer + * @parent: index in offset array pointing to parent buffer + * @parent_offset: offset in @parent pointing to this buffer + * + * A binder_buffer object represents an object that the + * binder kernel driver can copy verbatim to the target + * address space. A buffer itself may be pointed to from + * within another buffer, meaning that the pointer inside + * that other buffer needs to be fixed up as well. This + * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT + * flag in @flags, by setting @parent buffer to the index + * in the offset array pointing to the parent binder_buffer_object, + * and by setting @parent_offset to the offset in the parent buffer + * at which the pointer to this buffer is located. + */ +struct binder_buffer_object { + struct binder_object_header hdr; + __u32 flags; + binder_uintptr_t buffer; + binder_size_t length; + binder_size_t parent; + binder_size_t parent_offset; +}; + +enum { + BINDER_BUFFER_FLAG_HAS_PARENT = 0x01, +}; + +/* struct binder_fd_array_object - object describing an array of fds in a buffer + * @hdr: common header structure + * @pad: padding to ensure correct alignment + * @num_fds: number of file descriptors in the buffer + * @parent: index in offset array to buffer holding the fd array + * @parent_offset: start offset of fd array in the buffer + * + * A binder_fd_array object represents an array of file + * descriptors embedded in a binder_buffer_object. It is + * different from a regular binder_buffer_object because it + * describes a list of file descriptors to fix up, not an opaque + * blob of memory, and hence the kernel needs to treat it differently. + * + * An example of how this would be used is with Android's + * native_handle_t object, which is a struct with a list of integers + * and a list of file descriptors. The native_handle_t struct itself + * will be represented by a struct binder_buffer_objct, whereas the + * embedded list of file descriptors is represented by a + * struct binder_fd_array_object with that binder_buffer_object as + * a parent. + */ +struct binder_fd_array_object { + struct binder_object_header hdr; + __u32 pad; + binder_size_t num_fds; + binder_size_t parent; + binder_size_t parent_offset; +}; + +/* + * On 64-bit platforms where user code may run in 32-bits the driver must + * translate the buffer (and local binder) addresses appropriately. + */ + +struct binder_write_read { + binder_size_t write_size; /* bytes to write */ + binder_size_t write_consumed; /* bytes consumed by driver */ + binder_uintptr_t write_buffer; + binder_size_t read_size; /* bytes to read */ + binder_size_t read_consumed; /* bytes consumed by driver */ + binder_uintptr_t read_buffer; +}; + +/* Use with BINDER_VERSION, driver fills in fields. */ +struct binder_version { + /* driver protocol version -- increment with incompatible change */ + __s32 protocol_version; +}; + +/* This is the current protocol version. */ +#ifdef BINDER_IPC_32BIT +#define BINDER_CURRENT_PROTOCOL_VERSION 7 +#else +#define BINDER_CURRENT_PROTOCOL_VERSION 8 +#endif + +/* + * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields. + * Set ptr to NULL for the first call to get the info for the first node, and + * then repeat the call passing the previously returned value to get the next + * nodes. ptr will be 0 when there are no more nodes. + */ +struct binder_node_debug_info { + binder_uintptr_t ptr; + binder_uintptr_t cookie; + __u32 has_strong_ref; + __u32 has_weak_ref; +}; + +struct binder_node_info_for_ref { + __u32 handle; + __u32 strong_count; + __u32 weak_count; + __u32 reserved1; + __u32 reserved2; + __u32 reserved3; +}; + +#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) +#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) +#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) +#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) +#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) +#define BINDER_THREAD_EXIT _IOW('b', 8, __s32) +#define BINDER_VERSION _IOWR('b', 9, struct binder_version) +#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) +#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) +#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object) + +/* + * NOTE: Two special error codes you should check for when calling + * in to the driver are: + * + * EINTR -- The operation has been interupted. This should be + * handled by retrying the ioctl() until a different error code + * is returned. + * + * ECONNREFUSED -- The driver is no longer accepting operations + * from your process. That is, the process is being destroyed. + * You should handle this by exiting from your process. Note + * that once this error code is returned, all further calls to + * the driver from any thread will return this same code. + */ + +enum transaction_flags { + TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ + TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ + TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ + TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ +}; + +struct binder_transaction_data { + /* The first two are only used for bcTRANSACTION and brTRANSACTION, + * identifying the target and contents of the transaction. + */ + union { + /* target descriptor of command transaction */ + __u32 handle; + /* target descriptor of return transaction */ + binder_uintptr_t ptr; + } target; + binder_uintptr_t cookie; /* target object cookie */ + __u32 code; /* transaction command */ + + /* General information about the transaction. */ + __u32 flags; + pid_t sender_pid; + uid_t sender_euid; + binder_size_t data_size; /* number of bytes of data */ + binder_size_t offsets_size; /* number of bytes of offsets */ + + /* If this transaction is inline, the data immediately + * follows here; otherwise, it ends with a pointer to + * the data buffer. + */ + union { + struct { + /* transaction data */ + binder_uintptr_t buffer; + /* offsets from buffer to flat_binder_object structs */ + binder_uintptr_t offsets; + } ptr; + __u8 buf[8]; + } data; +}; + +struct binder_transaction_data_secctx { + struct binder_transaction_data transaction_data; + binder_uintptr_t secctx; +}; + +struct binder_transaction_data_sg { + struct binder_transaction_data transaction_data; + binder_size_t buffers_size; +}; + +struct binder_ptr_cookie { + binder_uintptr_t ptr; + binder_uintptr_t cookie; +}; + +struct binder_handle_cookie { + __u32 handle; + binder_uintptr_t cookie; +} __packed; + +struct binder_pri_desc { + __s32 priority; + __u32 desc; +}; + +struct binder_pri_ptr_cookie { + __s32 priority; + binder_uintptr_t ptr; + binder_uintptr_t cookie; +}; + +enum binder_driver_return_protocol { + BR_ERROR = _IOR('r', 0, __s32), + /* + * int: error code + */ + + BR_OK = _IO('r', 1), + /* No parameters! */ + + BR_TRANSACTION_SEC_CTX = _IOR('r', 2, + struct binder_transaction_data_secctx), + /* + * binder_transaction_data_secctx: the received command. + */ + BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), + BR_REPLY = _IOR('r', 3, struct binder_transaction_data), + /* + * binder_transaction_data: the received command. + */ + + BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), + /* + * not currently supported + * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. + * Else the remote object has acquired a primary reference. + */ + + BR_DEAD_REPLY = _IO('r', 5), + /* + * The target of the last transaction (either a bcTRANSACTION or + * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. + */ + + BR_TRANSACTION_COMPLETE = _IO('r', 6), + /* + * No parameters... always refers to the last transaction requested + * (including replies). Note that this will be sent even for + * asynchronous transactions. + */ + + BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), + BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), + BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), + BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), + /* + * void *: ptr to binder + * void *: cookie for binder + */ + + BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), + /* + * not currently supported + * int: priority + * void *: ptr to binder + * void *: cookie for binder + */ + + BR_NOOP = _IO('r', 12), + /* + * No parameters. Do nothing and examine the next command. It exists + * primarily so that we can replace it with a BR_SPAWN_LOOPER command. + */ + + BR_SPAWN_LOOPER = _IO('r', 13), + /* + * No parameters. The driver has determined that a process has no + * threads waiting to service incoming transactions. When a process + * receives this command, it must spawn a new service thread and + * register it via bcENTER_LOOPER. + */ + + BR_FINISHED = _IO('r', 14), + /* + * not currently supported + * stop threadpool thread + */ + + BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t), + /* + * void *: cookie + */ + BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t), + /* + * void *: cookie + */ + + BR_FAILED_REPLY = _IO('r', 17), + /* + * The the last transaction (either a bcTRANSACTION or + * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. + */ +}; + +enum binder_driver_command_protocol { + BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), + BC_REPLY = _IOW('c', 1, struct binder_transaction_data), + /* + * binder_transaction_data: the sent command. + */ + + BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), + /* + * not currently supported + * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. + * Else you have acquired a primary reference on the object. + */ + + BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t), + /* + * void *: ptr to transaction data received on a read + */ + + BC_INCREFS = _IOW('c', 4, __u32), + BC_ACQUIRE = _IOW('c', 5, __u32), + BC_RELEASE = _IOW('c', 6, __u32), + BC_DECREFS = _IOW('c', 7, __u32), + /* + * int: descriptor + */ + + BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), + BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), + /* + * void *: ptr to binder + * void *: cookie for binder + */ + + BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), + /* + * not currently supported + * int: priority + * int: descriptor + */ + + BC_REGISTER_LOOPER = _IO('c', 11), + /* + * No parameters. + * Register a spawned looper thread with the device. + */ + + BC_ENTER_LOOPER = _IO('c', 12), + BC_EXIT_LOOPER = _IO('c', 13), + /* + * No parameters. + * These two commands are sent as an application-level thread + * enters and exits the binder loop, respectively. They are + * used so the binder can have an accurate count of the number + * of looping threads it has available. + */ + + BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, + struct binder_handle_cookie), + /* + * int: handle + * void *: cookie + */ + + BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, + struct binder_handle_cookie), + /* + * int: handle + * void *: cookie + */ + + BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t), + /* + * void *: cookie + */ + + BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg), + BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg), + /* + * binder_transaction_data_sg: the sent command. + */ +}; + +#endif /* _UAPI_LINUX_BINDER_H */ + diff --git a/binder/binder_alloc.c b/binder/binder_alloc.c index 5d27553..c4d8b78 100644 --- a/binder/binder_alloc.c +++ b/binder/binder_alloc.c @@ -17,9 +17,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include -#include +#include #include #include #include @@ -28,32 +27,29 @@ #include #include #include -#include +#include +#include #include "binder_alloc.h" #include "binder_trace.h" -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) -#include -#endif - struct list_lru binder_alloc_lru; static DEFINE_MUTEX(binder_alloc_mmap_lock); enum { + BINDER_DEBUG_USER_ERROR = 1U << 0, BINDER_DEBUG_OPEN_CLOSE = 1U << 1, BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, }; -static uint32_t binder_alloc_debug_mask; +static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR; -module_param_named(debug_mask, binder_alloc_debug_mask, - uint, 0644); +module_param_named(alloc_debug_mask, binder_alloc_debug_mask, uint, 0644); #define binder_alloc_debug(mask, x...) \ do { \ if (binder_alloc_debug_mask & mask) \ - pr_info(x); \ + pr_info_ratelimited(x); \ } while (0) static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) @@ -154,14 +150,12 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( else { /* * Guard against user threads attempting to - * free the buffer twice + * free the buffer when in use by kernel or + * after it's already been freed. */ - if (buffer->free_in_progress) { - pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", - alloc->pid, current->pid, (u64)user_ptr); - return NULL; - } - buffer->free_in_progress = 1; + if (!buffer->allow_user_free) + return ERR_PTR(-EPERM); + buffer->allow_user_free = 0; return buffer; } } @@ -191,12 +185,12 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, } static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - void *start, void *end, - struct vm_area_struct *vma) + void *start, void *end) { void *page_addr; unsigned long user_page_addr; struct binder_lru_page *page; + struct vm_area_struct *vma = NULL; struct mm_struct *mm = NULL; bool need_mm = false; @@ -220,17 +214,18 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } } - if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm)) + if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) mm = alloc->vma_vm_mm; if (mm) { - down_write(&mm->mmap_sem); + down_read(&mm->mmap_sem); vma = alloc->vma; } if (!vma && need_mm) { - pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", - alloc->pid); + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%d: binder_alloc_buf failed to map pages in userspace, no vma\n", + alloc->pid); goto err_no_vma; } @@ -286,11 +281,14 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, goto err_vm_insert_page_failed; } + if (index + 1 > alloc->pages_high) + alloc->pages_high = index + 1; + trace_binder_alloc_page_end(alloc, index); /* vm_insert_page does not seem to increment the refcount */ } if (mm) { - up_write(&mm->mmap_sem); + up_read(&mm->mmap_sem); mmput(mm); } return 0; @@ -323,17 +321,47 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } err_no_vma: if (mm) { - up_write(&mm->mmap_sem); + up_read(&mm->mmap_sem); mmput(mm); } return vma ? -ENOMEM : -ESRCH; } -struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async) + +static inline void binder_alloc_set_vma(struct binder_alloc *alloc, + struct vm_area_struct *vma) +{ + if (vma) + alloc->vma_vm_mm = vma->vm_mm; + /* + * If we see alloc->vma is not NULL, buffer data structures set up + * completely. Look at smp_rmb side binder_alloc_get_vma. + * We also want to guarantee new alloc->vma_vm_mm is always visible + * if alloc->vma is set. + */ + smp_wmb(); + alloc->vma = vma; +} + +static inline struct vm_area_struct *binder_alloc_get_vma( + struct binder_alloc *alloc) +{ + struct vm_area_struct *vma = NULL; + + if (alloc->vma) { + /* Look at description in binder_alloc_set_vma */ + smp_rmb(); + vma = alloc->vma; + } + return vma; +} + +static struct binder_buffer *binder_alloc_new_buf_locked( + struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) { struct rb_node *n = alloc->free_buffers.rb_node; struct binder_buffer *buffer; @@ -344,9 +372,10 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, size_t size, data_offsets_size; int ret; - if (alloc->vma == NULL) { - pr_err("%d: binder_alloc_buf, no vma\n", - alloc->pid); + if (!binder_alloc_get_vma(alloc)) { + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%d: binder_alloc_buf, no vma\n", + alloc->pid); return ERR_PTR(-ESRCH); } @@ -418,11 +447,14 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, if (buffer_size > largest_free_size) largest_free_size = buffer_size; } - pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", - alloc->pid, size); - pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", - total_alloc_size, allocated_buffers, largest_alloc_size, - total_free_size, free_buffers, largest_free_size); + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%d: binder_alloc_buf size %zd failed, no address space\n", + alloc->pid, size); + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", + total_alloc_size, allocated_buffers, + largest_alloc_size, total_free_size, + free_buffers, largest_free_size); return ERR_PTR(-ENOSPC); } if (n == NULL) { @@ -442,7 +474,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; ret = binder_update_page_range(alloc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL); + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); if (ret) return ERR_PTR(ret); @@ -463,7 +495,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, rb_erase(best_fit, &alloc->free_buffers); buffer->free = 0; - buffer->free_in_progress = 0; + buffer->allow_user_free = 0; binder_insert_allocated_buffer_locked(alloc, buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got %pK\n", @@ -483,7 +515,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, err_alloc_buf_struct_failed: binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), - end_page_addr, NULL); + end_page_addr); return ERR_PTR(-ENOMEM); } @@ -567,8 +599,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, alloc->pid, buffer->data, prev->data, next ? next->data : NULL); binder_update_page_range(alloc, 0, buffer_start_page(buffer), - buffer_start_page(buffer) + PAGE_SIZE, - NULL); + buffer_start_page(buffer) + PAGE_SIZE); } list_del(&buffer->entry); kfree(buffer); @@ -605,8 +636,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), - NULL); + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; @@ -695,8 +725,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, } } #endif - alloc->pages = kzalloc(sizeof(alloc->pages[0]) * - ((vma->vm_end - vma->vm_start) / PAGE_SIZE), + alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, + sizeof(alloc->pages[0]), GFP_KERNEL); if (alloc->pages == NULL) { ret = -ENOMEM; @@ -717,9 +747,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; - barrier(); - alloc->vma = vma; - alloc->vma_vm_mm = vma->vm_mm; + binder_alloc_set_vma(alloc, vma); mmgrab(alloc->vma_vm_mm); return 0; @@ -734,8 +762,10 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, err_get_vm_area_failed: err_already_mapped: mutex_unlock(&binder_alloc_mmap_lock); - pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, - alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%s: %d %lx-%lx %s failed %d\n", __func__, + alloc->pid, vma->vm_start, vma->vm_end, + failure_string, ret); return ret; } @@ -746,10 +776,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) int buffers, page_count; struct binder_buffer *buffer; - BUG_ON(alloc->vma); - buffers = 0; mutex_lock(&alloc->mutex); + BUG_ON(alloc->vma); + while ((n = rb_first(&alloc->allocated_buffers))) { buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -860,6 +890,7 @@ void binder_alloc_print_pages(struct seq_file *m, } mutex_unlock(&alloc->mutex); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); + seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); } /** @@ -891,7 +922,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) */ void binder_alloc_vma_close(struct binder_alloc *alloc) { - WRITE_ONCE(alloc->vma, NULL); + binder_alloc_set_vma(alloc, NULL); } /** @@ -907,6 +938,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, spinlock_t *lock, void *cb_arg) + __must_hold(lock) { struct mm_struct *mm = NULL; struct binder_lru_page *page = container_of(item, @@ -926,14 +958,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item, index = page - alloc->pages; page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; - vma = alloc->vma; - if (vma) { - if (!mmget_not_zero(alloc->vma_vm_mm)) - goto err_mmget; - mm = alloc->vma_vm_mm; - if (!down_write_trylock(&mm->mmap_sem)) - goto err_down_write_mmap_sem_failed; - } + + mm = alloc->vma_vm_mm; + if (!mmget_not_zero(mm)) + goto err_mmget; + if (!down_write_trylock(&mm->mmap_sem)) + goto err_down_write_mmap_sem_failed; + vma = binder_alloc_get_vma(alloc); list_lru_isolate(lru, item); spin_unlock(lock); @@ -946,10 +977,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item, PAGE_SIZE); trace_binder_unmap_user_end(alloc, index); - - up_write(&mm->mmap_sem); - mmput(mm); } + up_write(&mm->mmap_sem); + mmput(mm); trace_binder_unmap_kernel_start(alloc, index); @@ -989,7 +1019,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) return ret; } -struct shrinker binder_shrinker = { +static struct shrinker binder_shrinker = { .count_objects = binder_shrink_count, .scan_objects = binder_shrink_scan, .seeks = DEFAULT_SEEKS, @@ -1009,8 +1039,14 @@ void binder_alloc_init(struct binder_alloc *alloc) INIT_LIST_HEAD(&alloc->buffers); } -void binder_alloc_shrinker_init(void) +int binder_alloc_shrinker_init(void) { - list_lru_init(&binder_alloc_lru); - register_shrinker(&binder_shrinker); + int ret = list_lru_init(&binder_alloc_lru); + + if (ret == 0) { + ret = register_shrinker(&binder_shrinker); + if (ret) + list_lru_destroy(&binder_alloc_lru); + } + return ret; } diff --git a/binder/binder_alloc.h b/binder/binder_alloc.h index 2dd33b6..366c833 100644 --- a/binder/binder_alloc.h +++ b/binder/binder_alloc.h @@ -15,6 +15,7 @@ #ifndef _LINUX_BINDER_ALLOC_H #define _LINUX_BINDER_ALLOC_H +#include #include #include #include @@ -30,16 +31,16 @@ struct binder_transaction; * struct binder_buffer - buffer used for binder transactions * @entry: entry alloc->buffers * @rb_node: node for allocated_buffers/free_buffers rb trees - * @free: true if buffer is free - * @allow_user_free: describe the second member of struct blah, - * @async_transaction: describe the second member of struct blah, - * @debug_id: describe the second member of struct blah, - * @transaction: describe the second member of struct blah, - * @target_node: describe the second member of struct blah, - * @data_size: describe the second member of struct blah, - * @offsets_size: describe the second member of struct blah, - * @extra_buffers_size: describe the second member of struct blah, - * @data:i describe the second member of struct blah, + * @free: %true if buffer is free + * @allow_user_free: %true if user is allowed to free buffer + * @async_transaction: %true if buffer is in use for an async txn + * @debug_id: unique ID for debugging + * @transaction: pointer to associated struct binder_transaction + * @target_node: struct binder_node associated with this buffer + * @data_size: size of @transaction data + * @offsets_size: size of array of offsets + * @extra_buffers_size: size of space for other objects (like sg lists) + * @data: pointer to base of buffer space * * Bookkeeping structure for binder transaction buffers */ @@ -50,8 +51,7 @@ struct binder_buffer { unsigned free:1; unsigned allow_user_free:1; unsigned async_transaction:1; - unsigned free_in_progress:1; - unsigned debug_id:28; + unsigned debug_id:29; struct binder_transaction *transaction; @@ -92,6 +92,7 @@ struct binder_lru_page { * @pages: array of binder_lru_page * @buffer_size: size of address space specified via mmap * @pid: pid for associated binder_proc (invariant after init) + * @pages_high: high watermark of offset in @pages * * Bookkeeping structure for per-proc address space management for binder * buffers. It is normally initialized during binder_init() and binder_mmap() @@ -112,9 +113,10 @@ struct binder_alloc { size_t buffer_size; uint32_t buffer_free; int pid; + size_t pages_high; }; -#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST +#if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_SELFTEST) void binder_selftest_alloc(struct binder_alloc *alloc); #else static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} @@ -128,7 +130,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t extra_buffers_size, int is_async); extern void binder_alloc_init(struct binder_alloc *alloc); -void binder_alloc_shrinker_init(void); +extern int binder_alloc_shrinker_init(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); extern struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, diff --git a/binder/binder_internal.h b/binder/binder_internal.h index 161c993..759bcec 100644 --- a/binder/binder_internal.h +++ b/binder/binder_internal.h @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -37,13 +38,22 @@ struct binder_device { extern const struct file_operations binder_fops; -//#ifdef CONFIG_ANDROID_BINDERFS +#if IS_ENABLED(CONFIG_ANDROID_BINDERFS) extern bool is_binderfs_device(const struct inode *inode); -//#else -//static inline bool is_binderfs_device(const struct inode *inode) -//{ -// return false; -//} -//#endif +#else +static inline bool is_binderfs_device(const struct inode *inode) +{ + return false; +} +#endif + +#if IS_ENABLED(CONFIG_ANDROID_BINDERFS) +extern int __init init_binderfs(void); +#else +static inline int __init init_binderfs(void) +{ + return 0; +} +#endif #endif /* _LINUX_BINDER_INTERNAL_H */ diff --git a/binder/binder_trace.h b/binder/binder_trace.h index 76e3b9c..14de7ac 100644 --- a/binder/binder_trace.h +++ b/binder/binder_trace.h @@ -223,22 +223,40 @@ TRACE_EVENT(binder_transaction_ref_to_ref, __entry->dest_ref_debug_id, __entry->dest_ref_desc) ); -TRACE_EVENT(binder_transaction_fd, - TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd), - TP_ARGS(t, src_fd, dest_fd), +TRACE_EVENT(binder_transaction_fd_send, + TP_PROTO(struct binder_transaction *t, int fd, size_t offset), + TP_ARGS(t, fd, offset), TP_STRUCT__entry( __field(int, debug_id) - __field(int, src_fd) - __field(int, dest_fd) + __field(int, fd) + __field(size_t, offset) + ), + TP_fast_assign( + __entry->debug_id = t->debug_id; + __entry->fd = fd; + __entry->offset = offset; + ), + TP_printk("transaction=%d src_fd=%d offset=%zu", + __entry->debug_id, __entry->fd, __entry->offset) +); + +TRACE_EVENT(binder_transaction_fd_recv, + TP_PROTO(struct binder_transaction *t, int fd, size_t offset), + TP_ARGS(t, fd, offset), + + TP_STRUCT__entry( + __field(int, debug_id) + __field(int, fd) + __field(size_t, offset) ), TP_fast_assign( __entry->debug_id = t->debug_id; - __entry->src_fd = src_fd; - __entry->dest_fd = dest_fd; + __entry->fd = fd; + __entry->offset = offset; ), - TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d", - __entry->debug_id, __entry->src_fd, __entry->dest_fd) + TP_printk("transaction=%d dest_fd=%d offset=%zu", + __entry->debug_id, __entry->fd, __entry->offset) ); DECLARE_EVENT_CLASS(binder_buffer_class, @@ -248,14 +266,17 @@ DECLARE_EVENT_CLASS(binder_buffer_class, __field(int, debug_id) __field(size_t, data_size) __field(size_t, offsets_size) + __field(size_t, extra_buffers_size) ), TP_fast_assign( __entry->debug_id = buf->debug_id; __entry->data_size = buf->data_size; __entry->offsets_size = buf->offsets_size; + __entry->extra_buffers_size = buf->extra_buffers_size; ), - TP_printk("transaction=%d data_size=%zd offsets_size=%zd", - __entry->debug_id, __entry->data_size, __entry->offsets_size) + TP_printk("transaction=%d data_size=%zd offsets_size=%zd extra_buffers_size=%zd", + __entry->debug_id, __entry->data_size, __entry->offsets_size, + __entry->extra_buffers_size) ); DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf, diff --git a/binder/binderfs.c b/binder/binderfs.c index f54f842..dc3e4f5 100644 --- a/binder/binderfs.c +++ b/binder/binderfs.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include #include #include #include @@ -10,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -19,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -27,25 +30,42 @@ #include #include #include -#include -#include "binder_ctl.h" #include "binder_internal.h" +#include "binder.h" +#include "binderfs.h" +#include "deps.h" #define FIRST_INODE 1 #define SECOND_INODE 2 #define INODE_OFFSET 3 #define INTSTRLEN 21 #define BINDERFS_MAX_MINOR (1U << MINORBITS) - -#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 - -static struct vfsmount *binderfs_mnt; +/* Ensure that the initial ipc namespace always has devices available. */ +#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4) static dev_t binderfs_dev; static DEFINE_MUTEX(binderfs_minors_mutex); static DEFINE_IDA(binderfs_minors); +/** + * binderfs_mount_opts - mount options for binderfs + * @max: maximum number of allocatable binderfs binder devices + */ +struct binderfs_mount_opts { + int max; +}; + +enum { + Opt_max, + Opt_err +}; + +static const match_table_t tokens = { + { Opt_max, "max=%d" }, + { Opt_err, NULL } +}; + /** * binderfs_info - information about a binderfs mount * @ipc_ns: The ipc namespace the binderfs mount belongs to. @@ -55,13 +75,16 @@ static DEFINE_IDA(binderfs_minors); * created. * @root_gid: gid that needs to be used when a new binder device is * created. + * @mount_opts: The mount options in use. + * @device_count: The current number of allocated binder devices. */ struct binderfs_info { struct ipc_namespace *ipc_ns; struct dentry *control_dentry; kuid_t root_uid; kgid_t root_gid; - + struct binderfs_mount_opts mount_opts; + int device_count; }; static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) @@ -69,7 +92,6 @@ static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) return inode->i_sb->s_fs_info; } -/* bool is_binderfs_device(const struct inode *inode) { if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC) @@ -77,7 +99,7 @@ bool is_binderfs_device(const struct inode *inode) return false; } -*/ + /** * binderfs_binder_device_create - allocate inode from super block of a * binderfs mount @@ -85,7 +107,7 @@ bool is_binderfs_device(const struct inode *inode) * @userp: buffer to copy information about new device for userspace to * @req: struct binderfs_device as copied from userspace * - * This function allocated a new binder_device and reserves a new minor + * This function allocates a new binder_device and reserves a new minor * number for it. * Minor numbers are limited and tracked globally in binderfs_minors. The * function will stash a struct binder_device for the specific binder @@ -101,21 +123,34 @@ static int binderfs_binder_device_create(struct inode *ref_inode, struct binderfs_device *req) { int minor, ret; - struct dentry *dentry, *dup, *root; + struct dentry *dentry, *root; struct binder_device *device; - size_t name_len = BINDERFS_MAX_NAME + 1; char *name = NULL; + size_t name_len; struct inode *inode = NULL; struct super_block *sb = ref_inode->i_sb; struct binderfs_info *info = sb->s_fs_info; +#if defined(CONFIG_IPC_NS) + bool use_reserve = (info->ipc_ns == show_init_ipc_ns_compat()); +#else + bool use_reserve = true; +#endif /* Reserve new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); - //minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); - minor = ida_simple_get(&binderfs_minors, 0, BINDERFS_MAX_MINOR, GFP_KERNEL); - mutex_unlock(&binderfs_minors_mutex); - if (minor < 0) + if (++info->device_count <= info->mount_opts.max) + minor = ida_alloc_max_compat(&binderfs_minors, + use_reserve ? BINDERFS_MAX_MINOR : + BINDERFS_MAX_MINOR_CAPPED, + GFP_KERNEL); + else + minor = -ENOSPC; + if (minor < 0) { + --info->device_count; + mutex_unlock(&binderfs_minors_mutex); return minor; + } + mutex_unlock(&binderfs_minors_mutex); ret = -ENOMEM; device = kzalloc(sizeof(*device), GFP_KERNEL); @@ -134,12 +169,13 @@ static int binderfs_binder_device_create(struct inode *ref_inode, inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; - name = kmalloc(name_len, GFP_KERNEL); + req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */ + name_len = strlen(req->name); + /* Make sure to include terminating NUL byte */ + name = kmemdup(req->name, name_len + 1, GFP_KERNEL); if (!name) goto err; - strscpy(name, req->name, name_len); - device->binderfs_inode = inode; device->context.binder_context_mgr_uid = INVALID_UID; device->context.name = name; @@ -158,28 +194,25 @@ static int binderfs_binder_device_create(struct inode *ref_inode, root = sb->s_root; inode_lock(d_inode(root)); - dentry = d_alloc_name(root, name); - if (!dentry) { + + /* look it up */ + dentry = lookup_one_len(name, root, name_len); + if (IS_ERR(dentry)) { inode_unlock(d_inode(root)); - ret = -ENOMEM; + ret = PTR_ERR(dentry); goto err; } - /* Verify that the name userspace gave us is not already in use. */ - dup = d_lookup(root, &dentry->d_name); - if (dup) { - if (d_really_is_positive(dup)) { - dput(dup); - dput(dentry); - inode_unlock(d_inode(root)); - ret = -EEXIST; - goto err; - } - dput(dup); + if (d_really_is_positive(dentry)) { + /* already exists */ + dput(dentry); + inode_unlock(d_inode(root)); + ret = -EEXIST; + goto err; } inode->i_private = device; - d_add(dentry, inode); + d_instantiate(dentry, inode); fsnotify_create(root->d_inode, dentry); inode_unlock(d_inode(root)); @@ -189,8 +222,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode, kfree(name); kfree(device); mutex_lock(&binderfs_minors_mutex); - //ida_free(&binderfs_minors, minor); - ida_simple_remove(&binderfs_minors, minor); + --info->device_count; + ida_free_compat(&binderfs_minors, minor); mutex_unlock(&binderfs_minors_mutex); iput(inode); @@ -235,6 +268,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd, static void binderfs_evict_inode(struct inode *inode) { struct binder_device *device = inode->i_private; + struct binderfs_info *info = BINDERFS_I(inode); clear_inode(inode); @@ -242,51 +276,95 @@ static void binderfs_evict_inode(struct inode *inode) return; mutex_lock(&binderfs_minors_mutex); - //ida_free(&binderfs_minors, device->miscdev.minor); - ida_simple_remove(&binderfs_minors, device->miscdev.minor); + --info->device_count; + ida_free_compat(&binderfs_minors, device->miscdev.minor); mutex_unlock(&binderfs_minors_mutex); kfree(device->context.name); kfree(device); } +/** + * binderfs_parse_mount_opts - parse binderfs mount options + * @data: options to set (can be NULL in which case defaults are used) + */ +static int binderfs_parse_mount_opts(char *data, + struct binderfs_mount_opts *opts) +{ + char *p; + opts->max = BINDERFS_MAX_MINOR; + + while ((p = strsep(&data, ",")) != NULL) { + substring_t args[MAX_OPT_ARGS]; + int token; + int max_devices; + + if (!*p) + continue; + + token = match_token(p, tokens, args); + switch (token) { + case Opt_max: + if (match_int(&args[0], &max_devices) || + (max_devices < 0 || + (max_devices > BINDERFS_MAX_MINOR))) + return -EINVAL; + + opts->max = max_devices; + break; + default: + pr_err("Invalid mount options\n"); + return -EINVAL; + } + } + + return 0; +} + +static int binderfs_remount(struct super_block *sb, int *flags, char *data) +{ + struct binderfs_info *info = sb->s_fs_info; + return binderfs_parse_mount_opts(data, &info->mount_opts); +} + +static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root) +{ + struct binderfs_info *info; + + info = root->d_sb->s_fs_info; + if (info->mount_opts.max <= BINDERFS_MAX_MINOR) + seq_printf(seq, ",max=%d", info->mount_opts.max); + + return 0; +} + static const struct super_operations binderfs_super_ops = { - .statfs = simple_statfs, - .evict_inode = binderfs_evict_inode, + .evict_inode = binderfs_evict_inode, + .remount_fs = binderfs_remount, + .show_options = binderfs_show_mount_opts, + .statfs = simple_statfs, }; +static inline bool is_binderfs_control_device(const struct dentry *dentry) +{ + struct binderfs_info *info = dentry->d_sb->s_fs_info; + return info->control_dentry == dentry; +} + static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { - struct inode *inode = d_inode(old_dentry); - - /* binderfs doesn't support directories. */ - if (d_is_dir(old_dentry)) + if (is_binderfs_control_device(old_dentry) || + is_binderfs_control_device(new_dentry)) return -EPERM; - if (flags & ~RENAME_NOREPLACE) - return -EINVAL; - - if (!simple_empty(new_dentry)) - return -ENOTEMPTY; - - if (d_really_is_positive(new_dentry)) - simple_unlink(new_dir, new_dentry); - - old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime = - new_dir->i_mtime = inode->i_ctime = current_time(old_dir); - - return 0; + return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags); } static int binderfs_unlink(struct inode *dir, struct dentry *dentry) { - /* - * The control dentry is only ever touched during mount so checking it - * here should not require us to take lock. - */ - if (BINDERFS_I(dir)->control_dentry == dentry) + if (is_binderfs_control_device(dentry)) return -EPERM; return simple_unlink(dir, dentry); @@ -317,13 +395,16 @@ static int binderfs_binder_ctl_create(struct super_block *sb) struct inode *inode = NULL; struct dentry *root = sb->s_root; struct binderfs_info *info = sb->s_fs_info; +#if defined(CONFIG_IPC_NS) + bool use_reserve = (info->ipc_ns == show_init_ipc_ns_compat()); +#else + bool use_reserve = true; +#endif device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) return -ENOMEM; - inode_lock(d_inode(root)); - /* If we have already created a binder-control node, return. */ if (info->control_dentry) { ret = 0; @@ -337,8 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb) /* Reserve a new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); - //minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); - minor = ida_simple_get(&binderfs_minors, 0, BINDERFS_MAX_MINOR, GFP_KERNEL); + minor = ida_alloc_max_compat(&binderfs_minors, + use_reserve ? BINDERFS_MAX_MINOR : + BINDERFS_MAX_MINOR_CAPPED, + GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); if (minor < 0) { ret = minor; @@ -363,12 +446,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb) inode->i_private = device; info->control_dentry = dentry; d_add(dentry, inode); - inode_unlock(d_inode(root)); return 0; out: - inode_unlock(d_inode(root)); kfree(device); iput(inode); @@ -383,12 +464,9 @@ static const struct inode_operations binderfs_dir_inode_operations = { static int binderfs_fill_super(struct super_block *sb, void *data, int silent) { + int ret; struct binderfs_info *info; - int ret = -ENOMEM; struct inode *inode = NULL; - struct ipc_namespace *ipc_ns = sb->s_fs_info; - - get_ipc_ns(ipc_ns); sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; @@ -410,11 +488,17 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_op = &binderfs_super_ops; sb->s_time_gran = 1; - info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); - if (!info) - goto err_without_dentry; + sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); + if (!sb->s_fs_info) + return -ENOMEM; + info = sb->s_fs_info; + + info->ipc_ns = get_ipc_ns_exported_compat(current->nsproxy->ipc_ns); + + ret = binderfs_parse_mount_opts(data, &info->mount_opts); + if (ret) + return ret; - info->ipc_ns = ipc_ns; info->root_gid = make_kgid(sb->s_user_ns, 0); if (!gid_valid(info->root_gid)) info->root_gid = GLOBAL_ROOT_GID; @@ -422,11 +506,9 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) if (!uid_valid(info->root_uid)) info->root_uid = GLOBAL_ROOT_UID; - sb->s_fs_info = info; - inode = new_inode(sb); if (!inode) - goto err_without_dentry; + return -ENOMEM; inode->i_ino = FIRST_INODE; inode->i_fop = &simple_dir_operations; @@ -437,79 +519,28 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_root = d_make_root(inode); if (!sb->s_root) - goto err_without_dentry; - - ret = binderfs_binder_ctl_create(sb); - if (ret) - goto err_with_dentry; - - return 0; - -err_with_dentry: - dput(sb->s_root); - sb->s_root = NULL; - -err_without_dentry: - put_ipc_ns(ipc_ns); - iput(inode); - kfree(info); - - return ret; -} - -static int binderfs_test_super(struct super_block *sb, void *data) -{ - struct binderfs_info *info = sb->s_fs_info; - - if (info) - return info->ipc_ns == data; - - return 0; -} + return -ENOMEM; -static int binderfs_set_super(struct super_block *sb, void *data) -{ - sb->s_fs_info = data; - return set_anon_super(sb, NULL); + return binderfs_binder_ctl_create(sb); } static struct dentry *binderfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - struct super_block *sb; - struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; - - if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN)) - return ERR_PTR(-EPERM); - - sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super, - flags, ipc_ns->user_ns, ipc_ns); - if (IS_ERR(sb)) - return ERR_CAST(sb); - - if (!sb->s_root) { - int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0); - if (ret) { - deactivate_locked_super(sb); - return ERR_PTR(ret); - } - - sb->s_flags |= SB_ACTIVE; - } - - return dget(sb->s_root); + return mount_nodev(fs_type, flags, data, binderfs_fill_super); } static void binderfs_kill_super(struct super_block *sb) { struct binderfs_info *info = sb->s_fs_info; + kill_litter_super(sb); + if (info && info->ipc_ns) put_ipc_ns(info->ipc_ns); kfree(info); - kill_litter_super(sb); } static struct file_system_type binder_fs_type = { @@ -519,7 +550,7 @@ static struct file_system_type binder_fs_type = { .fs_flags = FS_USERNS_MOUNT, }; -static int __init init_binderfs(void) +int __init init_binderfs(void) { int ret; @@ -535,16 +566,5 @@ static int __init init_binderfs(void) return ret; } - binderfs_mnt = kern_mount(&binder_fs_type); - if (IS_ERR(binderfs_mnt)) { - ret = PTR_ERR(binderfs_mnt); - binderfs_mnt = NULL; - unregister_filesystem(&binder_fs_type); - unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR); - } - return ret; } - -module_init(init_binderfs); -MODULE_LICENSE("GPL v2"); diff --git a/binder/binder_ctl.h b/binder/binderfs.h similarity index 77% rename from binder/binder_ctl.h rename to binder/binderfs.h index 65b2efd..29edbad 100644 --- a/binder/binder_ctl.h +++ b/binder/binderfs.h @@ -4,15 +4,15 @@ * */ -#ifndef _UAPI_LINUX_BINDER_CTL_H -#define _UAPI_LINUX_BINDER_CTL_H +#ifndef _UAPI_LINUX_BINDERFS_H +#define _UAPI_LINUX_BINDERFS_H -#include #include #include +#include "binder.h" #define BINDERFS_MAX_NAME 255 - +#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 /** * struct binderfs_device - retrieve information about a new binder device * @name: the name to use for the new binderfs binder device @@ -22,8 +22,8 @@ */ struct binderfs_device { char name[BINDERFS_MAX_NAME + 1]; - __u8 major; - __u8 minor; + __u32 major; + __u32 minor; }; /** @@ -31,5 +31,5 @@ struct binderfs_device { */ #define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device) -#endif /* _UAPI_LINUX_BINDER_CTL_H */ +#endif /* _UAPI_LINUX_BINDERFS_H */ diff --git a/binder/deps.c b/binder/deps.c index b1a108c..dfc14a6 100644 --- a/binder/deps.c +++ b/binder/deps.c @@ -1,15 +1,4 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 +#include "deps.h" static struct vm_struct *(*get_vm_area_ptr)(unsigned long, unsigned long) = NULL; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) @@ -31,6 +20,8 @@ static int (*security_binder_transfer_binder_ptr)(struct task_struct *from, stru static int (*security_binder_transfer_file_ptr)(struct task_struct *from, struct task_struct *to, struct file *file) = NULL; static void (*mmput_async_ptr)(struct mm_struct *) = NULL; static void (*put_ipc_ns_ptr)(struct ipc_namespace *) = NULL; +static int (*task_work_add_ptr)(struct task_struct *, struct callback_head *, bool) = NULL; +static struct ipc_namespace *(*show_init_ipc_ns_ptr)(void) = NULL; struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { @@ -145,17 +136,115 @@ void mmput_async(struct mm_struct *mm) mmput_async_ptr(mm); } -bool is_binderfs_device(const struct inode *inode) +void put_ipc_ns(struct ipc_namespace *ns) { - if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC) - return true; + if (!put_ipc_ns_ptr) + put_ipc_ns_ptr = kallsyms_lookup_name("put_ipc_ns"); + if(put_ipc_ns_ptr) + put_ipc_ns_ptr(ns); +} - return false; +int task_work_add(struct task_struct *task, struct callback_head *twork, bool notify) +{ + if(!task_work_add_ptr) + task_work_add_ptr = kallsyms_lookup_name("task_work_add"); + if(task_work_add_ptr) + return task_work_add_ptr(task, twork, notify); + else + return -1; } -void put_ipc_ns(struct ipc_namespace *ns) +struct ipc_namespace *show_init_ipc_ns_compat(void) { - if (!put_ipc_ns_ptr) - put_ipc_ns_ptr = kallsyms_lookup_name("put_ipc_ns"); - put_ipc_ns_ptr(ns); + if(!show_init_ipc_ns_ptr) + show_init_ipc_ns_ptr = kallsyms_lookup_name("show_init_ipc_ns"); + if(show_init_ipc_ns_ptr) + return show_init_ipc_ns_ptr(); + else + return NULL; +} + +static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) +{ + __clear_bit(fd, fdt->open_fds); + __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits); +} + +static void __put_unused_fd(struct files_struct *files, unsigned int fd) +{ + struct fdtable *fdt = files_fdtable(files); + __clear_open_fd(fd, fdt); + if (fd < files->next_fd) + files->next_fd = fd; +} + +static int close_fd_get_file_backport(unsigned int fd, struct file **res) +{ + struct files_struct *files = current->files; + struct file *file; + struct fdtable *fdt; + + spin_lock(&files->file_lock); + fdt = files_fdtable(files); + if (fd >= fdt->max_fds) + goto out_unlock; + file = fdt->fd[fd]; + if (!file) + goto out_unlock; + rcu_assign_pointer(fdt->fd[fd], NULL); + __put_unused_fd(files, fd); + spin_unlock(&files->file_lock); + get_file(file); + *res = file; + return filp_close(file, files); + +out_unlock: + spin_unlock(&files->file_lock); + *res = NULL; + return -ENOENT; +} + +int __close_fd_get_file_compat(unsigned int fd, struct file **res) +{ + int (*close_fd_get_file_ptr)(unsigned int fd, struct file **res) = NULL; + + close_fd_get_file_ptr = kallsyms_lookup_name("__close_fd_get_file"); + if(close_fd_get_file_ptr) + return close_fd_get_file_ptr(fd, res); + else + return close_fd_get_file_backport(fd, res); +} + +struct ipc_namespace *get_ipc_ns_exported_compat(struct ipc_namespace *ns) +{ + struct ipc_namespace *(*get_ipc_ns_exported_ptr)(struct ipc_namespace *ns) = NULL; + + get_ipc_ns_exported_ptr = kallsyms_lookup_name("get_ipc_ns_exported"); + if(get_ipc_ns_exported_ptr) + return get_ipc_ns_exported_ptr(ns); + else + return get_ipc_ns(ns); +} + +int ida_alloc_max_compat(struct ida *ida, unsigned int max, gfp_t gfp) +{ + int (*ida_alloc_max_ptr)(struct ida *, unsigned int, gfp_t) = NULL; + + ida_alloc_max_ptr = kallsyms_lookup_name("ida_alloc_max"); + if(ida_alloc_max_ptr) + return ida_alloc_max_ptr(ida, max, gfp); + else + return ida_simple_get(ida, 0, max, gfp); + +} + +void ida_free_compat(struct ida *ida, unsigned int id) +{ + void (*ida_free_ptr)(struct ida *, unsigned int) = NULL; + ida_free_ptr = kallsyms_lookup_name("ida_free"); + + if(ida_free_ptr) + return ida_free_ptr(ida, id); + else + return ida_simple_remove(ida, id); } diff --git a/binder/deps.h b/binder/deps.h new file mode 100644 index 0000000..2665d3e --- /dev/null +++ b/binder/deps.h @@ -0,0 +1,43 @@ +#ifndef _DEPS_H +#define _DEPS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) + void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size); +#else + void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details); +#endif +int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); +struct files_struct *get_files_struct(struct task_struct *task); +void put_files_struct(struct files_struct *files); +struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags); +int __alloc_fd(struct files_struct *files, unsigned start, unsigned end, unsigned flags); +void __fd_install(struct files_struct *files, unsigned int fd, struct file *file); +int __close_fd(struct files_struct *files, unsigned int fd); +int can_nice(const struct task_struct *p, const int nice); +int security_binder_set_context_mgr(struct task_struct *mgr); +int security_binder_transaction(struct task_struct *from, struct task_struct *to); +int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to); +int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file); +void mmput_async(struct mm_struct *mm); +void put_ipc_ns(struct ipc_namespace *ns); +int task_work_add(struct task_struct *task, struct callback_head *twork, bool notify); +int __close_fd_get_file_compat(unsigned int fd, struct file **res); +struct ipc_namespace *show_init_ipc_ns_compat(void); +struct ipc_namespace *get_ipc_ns_exported_compat(struct ipc_namespace *ns); +int ida_alloc_max_compat(struct ida *ida, unsigned int max, gfp_t gfp); +void ida_free_compat(struct ida *, unsigned int id); + +#endif From 02a3f554f5bf7db45c057772a377c9d468436cb4 Mon Sep 17 00:00:00 2001 From: Fengqian Gao Date: Tue, 10 Dec 2019 01:02:33 -0500 Subject: [PATCH 6/8] Fall back to older binderfs driver New binderfs driver which ported from Ubuntu 5.0 kernel cause high CPU usage of audio server. Fall back to older version of binderfs which ported from kernel 4.21 Plus: [WA]Binderfs compile error on kernel higher than 5.3.0 Tracked-On: OAM-88787 --- binder/Makefile | 14 +- binder/binder.c | 1128 +++++++++------------------ binder/binder.h | 504 ------------ binder/binder_alloc.c | 178 ++--- binder/binder_alloc.h | 30 +- binder/{binderfs.h => binder_ctl.h} | 14 +- binder/binder_internal.h | 24 +- binder/binder_trace.h | 43 +- binder/binderfs.c | 330 ++++---- binder/deps.c | 129 +-- binder/deps.h | 43 - 11 files changed, 655 insertions(+), 1782 deletions(-) delete mode 100644 binder/binder.h rename binder/{binderfs.h => binder_ctl.h} (77%) delete mode 100644 binder/deps.h diff --git a/binder/Makefile b/binder/Makefile index 0126060..b4ac7d8 100644 --- a/binder/Makefile +++ b/binder/Makefile @@ -1,7 +1,7 @@ -ccflags-y += -I$(src) -Wno-int-conversion -Wno-error=incompatible-pointer-types -DCONFIG_ANDROID_BINDER_DEVICES="\"\"" -DCONFIG_ANDROID_BINDERFS - -obj-m := binder_linux.o -binder_linux-y := binder.o binder_alloc.o deps.o binderfs.o +ccflags-y += -I$(src) -Wno-int-conversion -Wno-error=incompatible-pointer-types -DCONFIG_ANDROID_BINDER_DEVICES="\"hostbinder,hostvndbinder,hosthwbinder\"" -DCONFIG_ANDROID_BINDERFS +obj-m := binderfs_module.o binder_module.o +binder_module-y := deps.o binder.o binder_alloc.o +binderfs_module-y := deps.o binderfs.o KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build @@ -9,7 +9,9 @@ all: $(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD install: - cp binder_linux.ko $(DESTDIR)/ - insmod binder_linux.ko + cp binder_module.ko $(DESTDIR)/ + cp binderfs_module.ko $(DESTDIR)/ + insmod binder_module.ko + insmod binderfs_module.ko clean: rm -rf *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions *.ur-safe diff --git a/binder/binder.c b/binder/binder.c index 89d715a..caa9e9e 100644 --- a/binder/binder.c +++ b/binder/binder.c @@ -51,6 +51,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -70,38 +71,22 @@ #include #include #include -#include -#include #include -#include -#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#include +#include +#endif -#include "binder.h" +#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT +#define BINDER_IPC_32BIT 1 +#endif + +#include #include "binder_alloc.h" #include "binder_internal.h" #include "binder_trace.h" -#include "deps.h" - -#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 16, 0) -typedef unsigned __bitwise __poll_t; -typedef int vm_fault_t; - -#define DEFINE_SHOW_ATTRIBUTE(__name) \ -static int __name ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __name ## _show, inode->i_private); \ -} \ - \ -static const struct file_operations __name ## _fops = { \ - .owner = THIS_MODULE, \ - .open = __name ## _open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ -} - -#endif static HLIST_HEAD(binder_deferred_list); static DEFINE_MUTEX(binder_deferred_lock); @@ -117,8 +102,22 @@ static struct dentry *binder_debugfs_dir_entry_root; static struct dentry *binder_debugfs_dir_entry_proc; static atomic_t binder_last_id; -static int proc_show(struct seq_file *m, void *unused); -DEFINE_SHOW_ATTRIBUTE(proc); +#define BINDER_DEBUG_ENTRY(name) \ +static int binder_##name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, binder_##name##_show, inode->i_private); \ +} \ +\ +static const struct file_operations binder_##name##_fops = { \ + .owner = THIS_MODULE, \ + .open = binder_##name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +static int binder_proc_show(struct seq_file *m, void *unused); +BINDER_DEBUG_ENTRY(proc); /* This is only defined in include/asm-arm/sizes.h */ #ifndef SZ_1K @@ -150,9 +149,10 @@ enum { }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; -module_param_named(debug_mask, binder_debug_mask, uint, 0644); +//yangbin tmp comments because it will cause crash when we insmod +//module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); -static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; +static char *binder_devices_param = (strcmp(CONFIG_ANDROID_BINDER_DEVICES, "")) ? CONFIG_ANDROID_BINDER_DEVICES : "hostbinder,hostvndbinder,hosthwbinder"; module_param_named(devices, binder_devices_param, charp, 0444); static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); @@ -174,18 +174,18 @@ static int binder_set_stop_on_user_error(const char *val, return ret; } module_param_call(stop_on_user_error, binder_set_stop_on_user_error, - param_get_int, &binder_stop_on_user_error, 0644); + param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); #define binder_debug(mask, x...) \ do { \ if (binder_debug_mask & mask) \ - pr_info_ratelimited(x); \ + pr_info(x); \ } while (0) #define binder_user_error(x...) \ do { \ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ - pr_info_ratelimited(x); \ + pr_info(x); \ if (binder_stop_on_user_error) \ binder_stop_on_user_error = 2; \ } while (0) @@ -263,7 +263,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add( unsigned int cur = atomic_inc_return(&log->cur); if (cur >= ARRAY_SIZE(log->entry)) - log->full = true; + log->full = 1; e = &log->entry[cur % ARRAY_SIZE(log->entry)]; WRITE_ONCE(e->debug_id_done, 0); /* @@ -355,8 +355,6 @@ struct binder_error { * (invariant after initialized) * @min_priority: minimum scheduling priority * (invariant after initialized) - * @txn_security_ctx: require sender's security context - * (invariant after initialized) * @async_todo: list of async work items * (protected by @proc->inner_lock) * @@ -393,7 +391,6 @@ struct binder_node { * invariant after initialization */ u8 accept_fds:1; - u8 txn_security_ctx:1; u8 min_priority; }; bool has_async_transaction; @@ -461,8 +458,9 @@ struct binder_ref { }; enum binder_deferred_state { - BINDER_DEFERRED_FLUSH = 0x01, - BINDER_DEFERRED_RELEASE = 0x02, + BINDER_DEFERRED_PUT_FILES = 0x01, + BINDER_DEFERRED_FLUSH = 0x02, + BINDER_DEFERRED_RELEASE = 0x04, }; /** @@ -483,6 +481,9 @@ enum binder_deferred_state { * (invariant after initialized) * @tsk task_struct for group_leader of process * (invariant after initialized) + * @files files_struct for process + * (protected by @files_lock) + * @files_lock mutex to protect @files * @deferred_work_node: element for binder_deferred_list * (protected by binder_deferred_lock) * @deferred_work: bitmap of deferred work to perform @@ -492,6 +493,8 @@ enum binder_deferred_state { * (protected by @inner_lock) * @todo: list of work for this process * (protected by @inner_lock) + * @wait: wait queue head to wait for proc work + * (invariant after initialized) * @stats: per-process binder statistics * (atomics, no lock needed) * @delivered_death: list of delivered death notification @@ -527,11 +530,14 @@ struct binder_proc { struct list_head waiting_threads; int pid; struct task_struct *tsk; + struct files_struct *files; + struct mutex files_lock; struct hlist_node deferred_work_node; int deferred_work; bool is_dead; struct list_head todo; + wait_queue_head_t wait; struct binder_stats stats; struct list_head delivered_death; int max_threads; @@ -573,8 +579,6 @@ enum { * (protected by @proc->inner_lock) * @todo: list of work to do for this thread * (protected by @proc->inner_lock) - * @process_todo: whether work in @todo should be processed - * (protected by @proc->inner_lock) * @return_error: transaction errors reported by this thread * (only accessed by this thread) * @reply_error: transaction errors reported by target thread @@ -600,7 +604,6 @@ struct binder_thread { bool looper_need_return; /* can be written by other thread */ struct binder_transaction *transaction_stack; struct list_head todo; - bool process_todo; struct binder_error return_error; struct binder_error reply_error; wait_queue_head_t wait; @@ -609,23 +612,6 @@ struct binder_thread { bool is_dead; }; -/** - * struct binder_txn_fd_fixup - transaction fd fixup list element - * @fixup_entry: list entry - * @file: struct file to be associated with new fd - * @offset: offset in buffer data to this fixup - * - * List element for fd fixups in a transaction. Since file - * descriptors need to be allocated in the context of the - * target process, we pass each fd to be processed in this - * struct. - */ -struct binder_txn_fd_fixup { - struct list_head fixup_entry; - struct file *file; - size_t offset; -}; - struct binder_transaction { int debug_id; struct binder_work work; @@ -643,8 +629,6 @@ struct binder_transaction { long priority; long saved_priority; kuid_t sender_euid; - struct list_head fd_fixups; - binder_uintptr_t security_ctx; /** * @lock: protects @from, @to_proc, and @to_thread * @@ -664,7 +648,6 @@ struct binder_transaction { #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) static void _binder_proc_lock(struct binder_proc *proc, int line) - __acquires(&proc->outer_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -680,7 +663,6 @@ _binder_proc_lock(struct binder_proc *proc, int line) #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) static void _binder_proc_unlock(struct binder_proc *proc, int line) - __releases(&proc->outer_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -696,7 +678,6 @@ _binder_proc_unlock(struct binder_proc *proc, int line) #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) static void _binder_inner_proc_lock(struct binder_proc *proc, int line) - __acquires(&proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -712,7 +693,6 @@ _binder_inner_proc_lock(struct binder_proc *proc, int line) #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) static void _binder_inner_proc_unlock(struct binder_proc *proc, int line) - __releases(&proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -728,7 +708,6 @@ _binder_inner_proc_unlock(struct binder_proc *proc, int line) #define binder_node_lock(node) _binder_node_lock(node, __LINE__) static void _binder_node_lock(struct binder_node *node, int line) - __acquires(&node->lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -744,7 +723,6 @@ _binder_node_lock(struct binder_node *node, int line) #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) static void _binder_node_unlock(struct binder_node *node, int line) - __releases(&node->lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -761,16 +739,12 @@ _binder_node_unlock(struct binder_node *node, int line) #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) static void _binder_node_inner_lock(struct binder_node *node, int line) - __acquires(&node->lock) __acquires(&node->proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_lock(&node->lock); if (node->proc) binder_inner_proc_lock(node->proc); - else - /* annotation for sparse */ - __acquire(&node->proc->inner_lock); } /** @@ -782,7 +756,6 @@ _binder_node_inner_lock(struct binder_node *node, int line) #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) static void _binder_node_inner_unlock(struct binder_node *node, int line) - __releases(&node->lock) __releases(&node->proc->inner_lock) { struct binder_proc *proc = node->proc; @@ -790,9 +763,6 @@ _binder_node_inner_unlock(struct binder_node *node, int line) "%s: line=%d\n", __func__, line); if (proc) binder_inner_proc_unlock(proc); - else - /* annotation for sparse */ - __release(&node->proc->inner_lock); spin_unlock(&node->lock); } @@ -819,16 +789,6 @@ static bool binder_worklist_empty(struct binder_proc *proc, return ret; } -/** - * binder_enqueue_work_ilocked() - Add an item to the work list - * @work: struct binder_work to add to list - * @target_list: list to add work to - * - * Adds the work to the specified list. Asserts that work - * is not already on a list. - * - * Requires the proc->inner_lock to be held. - */ static void binder_enqueue_work_ilocked(struct binder_work *work, struct list_head *target_list) @@ -839,58 +799,22 @@ binder_enqueue_work_ilocked(struct binder_work *work, } /** - * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work - * @thread: thread to queue work to - * @work: struct binder_work to add to list - * - * Adds the work to the todo list of the thread. Doesn't set the process_todo - * flag, which means that (if it wasn't already set) the thread will go to - * sleep without handling this work when it calls read. - * - * Requires the proc->inner_lock to be held. - */ -static void -binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, - struct binder_work *work) -{ - WARN_ON(!list_empty(&thread->waiting_thread_node)); - binder_enqueue_work_ilocked(work, &thread->todo); -} - -/** - * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list - * @thread: thread to queue work to - * @work: struct binder_work to add to list - * - * Adds the work to the todo list of the thread, and enables processing - * of the todo queue. - * - * Requires the proc->inner_lock to be held. - */ -static void -binder_enqueue_thread_work_ilocked(struct binder_thread *thread, - struct binder_work *work) -{ - WARN_ON(!list_empty(&thread->waiting_thread_node)); - binder_enqueue_work_ilocked(work, &thread->todo); - thread->process_todo = true; -} - -/** - * binder_enqueue_thread_work() - Add an item to the thread work list - * @thread: thread to queue work to + * binder_enqueue_work() - Add an item to the work list + * @proc: binder_proc associated with list * @work: struct binder_work to add to list + * @target_list: list to add work to * - * Adds the work to the todo list of the thread, and enables processing - * of the todo queue. + * Adds the work to the specified list. Asserts that work + * is not already on a list. */ static void -binder_enqueue_thread_work(struct binder_thread *thread, - struct binder_work *work) +binder_enqueue_work(struct binder_proc *proc, + struct binder_work *work, + struct list_head *target_list) { - binder_inner_proc_lock(thread->proc); - binder_enqueue_thread_work_ilocked(thread, work); - binder_inner_proc_unlock(thread->proc); + binder_inner_proc_lock(proc); + binder_enqueue_work_ilocked(work, target_list); + binder_inner_proc_unlock(proc); } static void @@ -953,10 +877,70 @@ static void binder_free_thread(struct binder_thread *thread); static void binder_free_proc(struct binder_proc *proc); static void binder_inc_node_tmpref_ilocked(struct binder_node *node); +static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) +{ + unsigned long rlim_cur; + unsigned long irqs; + int ret; + + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + ret = -ESRCH; + goto err; + } + if (!lock_task_sighand(proc->tsk, &irqs)) { + ret = -EMFILE; + goto err; + } + rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); + unlock_task_sighand(proc->tsk, &irqs); + + ret = __alloc_fd(proc->files, 0, rlim_cur, flags); +err: + mutex_unlock(&proc->files_lock); + return ret; +} + +/* + * copied from fd_install + */ +static void task_fd_install( + struct binder_proc *proc, unsigned int fd, struct file *file) +{ + mutex_lock(&proc->files_lock); + if (proc->files) + __fd_install(proc->files, fd, file); + mutex_unlock(&proc->files_lock); +} + +/* + * copied from sys_close + */ +static long task_close_fd(struct binder_proc *proc, unsigned int fd) +{ + int retval; + + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + retval = -ESRCH; + goto err; + } + retval = __close_fd(proc->files, fd); + /* can't restart close syscall because file table entry was cleared */ + if (unlikely(retval == -ERESTARTSYS || + retval == -ERESTARTNOINTR || + retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK)) + retval = -EINTR; +err: + mutex_unlock(&proc->files_lock); + return retval; +} + static bool binder_has_work_ilocked(struct binder_thread *thread, bool do_proc_work) { - return thread->process_todo || + return !binder_worklist_empty_ilocked(&thread->todo) || thread->looper_need_return || (do_proc_work && !binder_worklist_empty_ilocked(&thread->proc->todo)); @@ -1182,7 +1166,6 @@ static struct binder_node *binder_init_node_ilocked( node->work.type = BINDER_WORK_NODE; node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); - node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); @@ -1244,12 +1227,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, } else node->local_strong_refs++; if (!node->has_strong_ref && target_list) { - struct binder_thread *thread = container_of(target_list, - struct binder_thread, todo); binder_dequeue_work_ilocked(&node->work); - BUG_ON(&thread->todo != target_list); - binder_enqueue_deferred_thread_work_ilocked(thread, - &node->work); + binder_enqueue_work_ilocked(&node->work, target_list); } } else { if (!internal) @@ -1260,9 +1239,6 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, node->debug_id); return -EINVAL; } - /* - * See comment above - */ binder_enqueue_work_ilocked(&node->work, target_list); } } @@ -1403,14 +1379,10 @@ static void binder_dec_node_tmpref(struct binder_node *node) binder_node_inner_lock(node); if (!node->proc) spin_lock(&binder_dead_nodes_lock); - else - __acquire(&binder_dead_nodes_lock); node->tmp_refs--; BUG_ON(node->tmp_refs < 0); if (!node->proc) spin_unlock(&binder_dead_nodes_lock); - else - __release(&binder_dead_nodes_lock); /* * Call binder_dec_node() to check if all refcounts are 0 * and cleanup is needed. Calling with strong=0 and internal=1 @@ -1913,62 +1885,26 @@ static struct binder_thread *binder_get_txn_from( */ static struct binder_thread *binder_get_txn_from_and_acq_inner( struct binder_transaction *t) - __acquires(&t->from->proc->inner_lock) { struct binder_thread *from; from = binder_get_txn_from(t); - if (!from) { - __acquire(&from->proc->inner_lock); + if (!from) return NULL; - } binder_inner_proc_lock(from->proc); if (t->from) { BUG_ON(from != t->from); return from; } binder_inner_proc_unlock(from->proc); - __acquire(&from->proc->inner_lock); binder_thread_dec_tmpref(from); return NULL; } -/** - * binder_free_txn_fixups() - free unprocessed fd fixups - * @t: binder transaction for t->from - * - * If the transaction is being torn down prior to being - * processed by the target process, free all of the - * fd fixups and fput the file structs. It is safe to - * call this function after the fixups have been - * processed -- in that case, the list will be empty. - */ -static void binder_free_txn_fixups(struct binder_transaction *t) -{ - struct binder_txn_fd_fixup *fixup, *tmp; - - list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { - fput(fixup->file); - list_del(&fixup->fixup_entry); - kfree(fixup); - } -} - static void binder_free_transaction(struct binder_transaction *t) { - struct binder_proc *target_proc = t->to_proc; - - if (target_proc) { - binder_inner_proc_lock(target_proc); - if (t->buffer) - t->buffer->transaction = NULL; - binder_inner_proc_unlock(target_proc); - } - /* - * If the transaction has no target_proc, then - * t->buffer->transaction has already been cleared. - */ - binder_free_txn_fixups(t); + if (t->buffer) + t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } @@ -1992,26 +1928,18 @@ static void binder_send_failed_reply(struct binder_transaction *t, binder_pop_transaction_ilocked(target_thread, t); if (target_thread->reply_error.cmd == BR_OK) { target_thread->reply_error.cmd = error_code; - binder_enqueue_thread_work_ilocked( - target_thread, - &target_thread->reply_error.work); + binder_enqueue_work_ilocked( + &target_thread->reply_error.work, + &target_thread->todo); wake_up_interruptible(&target_thread->wait); } else { - /* - * Cannot get here for normal operation, but - * we can if multiple synchronous transactions - * are sent without blocking for responses. - * Just ignore the 2nd error in this case. - */ - pr_warn("Unexpected reply error: %u\n", - target_thread->reply_error.cmd); + WARN(1, "Unexpected reply error: %u\n", + target_thread->reply_error.cmd); } binder_inner_proc_unlock(target_thread->proc); binder_thread_dec_tmpref(target_thread); binder_free_transaction(t); return; - } else { - __release(&target_thread->proc->inner_lock); } next = t->from_parent; @@ -2066,8 +1994,8 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) struct binder_object_header *hdr; size_t object_size = 0; - if (buffer->data_size < sizeof(*hdr) || - offset > buffer->data_size - sizeof(*hdr) || + if (offset > buffer->data_size - sizeof(*hdr) || + buffer->data_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) return 0; @@ -2199,64 +2127,6 @@ static bool binder_validate_fixup(struct binder_buffer *b, return (fixup_offset >= last_min_offset); } -/** - * struct binder_task_work_cb - for deferred close - * - * @twork: callback_head for task work - * @fd: fd to close - * - * Structure to pass task work to be handled after - * returning from binder_ioctl() via task_work_add(). - */ -struct binder_task_work_cb { - struct callback_head twork; - struct file *file; -}; - -/** - * binder_do_fd_close() - close list of file descriptors - * @twork: callback head for task work - * - * It is not safe to call ksys_close() during the binder_ioctl() - * function if there is a chance that binder's own file descriptor - * might be closed. This is to meet the requirements for using - * fdget() (see comments for __fget_light()). Therefore use - * task_work_add() to schedule the close operation once we have - * returned from binder_ioctl(). This function is a callback - * for that mechanism and does the actual ksys_close() on the - * given file descriptor. - */ -static void binder_do_fd_close(struct callback_head *twork) -{ - struct binder_task_work_cb *twcb = container_of(twork, - struct binder_task_work_cb, twork); - - fput(twcb->file); - kfree(twcb); -} - -/** - * binder_deferred_fd_close() - schedule a close for the given file-descriptor - * @fd: file-descriptor to close - * - * See comments in binder_do_fd_close(). This function is used to schedule - * a file-descriptor to be closed after returning from binder_ioctl(). - */ -static void binder_deferred_fd_close(int fd) -{ - struct binder_task_work_cb *twcb; - - twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); - if (!twcb) - return; - init_task_work(&twcb->twork, binder_do_fd_close); - __close_fd_get_file_compat(fd, &twcb->file); - if (twcb->file) - task_work_add(current, &twcb->twork, true); - else - kfree(twcb); -} - static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, binder_size_t *failed_at) @@ -2265,7 +2135,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, - "%d buffer release %d, size %zd-%zd, failed at %pK\n", + "%d buffer release %d, size %zd-%zd, failed at %p\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); @@ -2329,17 +2199,12 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } break; case BINDER_TYPE_FD: { - /* - * No need to close the file here since user-space - * closes it for for successfully delivered - * transactions. For transactions that weren't - * delivered, the new fd was never allocated so - * there is no need to close and the fput on the - * file is done when the transaction is torn - * down. - */ - WARN_ON(failed_at && - proc->tsk == current->group_leader); + struct binder_fd_object *fp = to_binder_fd_object(hdr); + + binder_debug(BINDER_DEBUG_TRANSACTION, + " fd %d\n", fp->fd); + if (failed_at) + task_close_fd(proc, fp->fd); } break; case BINDER_TYPE_PTR: /* @@ -2355,21 +2220,12 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, size_t fd_index; binder_size_t fd_buf_size; - if (proc->tsk != current->group_leader) { - /* - * Nothing to do if running in sender context - * The fd fixups have not been applied so no - * fds need to be closed. - */ - continue; - } - fda = to_binder_fd_array_object(hdr); parent = binder_validate_ptr(buffer, fda->parent, off_start, offp - off_start); if (!parent) { - pr_err("transaction release %d bad parent offset\n", + pr_err("transaction release %d bad parent offset", debug_id); continue; } @@ -2396,7 +2252,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); for (fd_index = 0; fd_index < fda->num_fds; fd_index++) - binder_deferred_fd_close(fd_array[fd_index]); + task_close_fd(proc, fd_array[fd_index]); } break; default: pr_err("transaction release %d bad object type %x\n", @@ -2491,15 +2347,11 @@ static int binder_translate_handle(struct flat_binder_object *fp, fp->cookie = node->cookie; if (node->proc) binder_inner_proc_lock(node->proc); - else - __acquire(&node->proc->inner_lock); binder_inc_node_nilocked(node, fp->hdr.type == BINDER_TYPE_BINDER, 0, NULL); if (node->proc) binder_inner_proc_unlock(node->proc); - else - __release(&node->proc->inner_lock); trace_binder_transaction_ref_to_node(t, node, &src_rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> node %d u%016llx\n", @@ -2532,18 +2384,17 @@ static int binder_translate_handle(struct flat_binder_object *fp, return ret; } -static int binder_translate_fd(u32 *fdp, +static int binder_translate_fd(int fd, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) { struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; - struct binder_txn_fd_fixup *fixup; + int target_fd; struct file *file; - int ret = 0; + int ret; bool target_allows_fd; - int fd = *fdp; if (in_reply_to) target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); @@ -2571,24 +2422,19 @@ static int binder_translate_fd(u32 *fdp, goto err_security; } - /* - * Add fixup record for this transaction. The allocation - * of the fd in the target needs to be done from a - * target thread. - */ - fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); - if (!fixup) { + target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); + if (target_fd < 0) { ret = -ENOMEM; - goto err_alloc; + goto err_get_unused_fd; } - fixup->file = file; - fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data; - trace_binder_transaction_fd_send(t, fd, fixup->offset); - list_add_tail(&fixup->fixup_entry, &t->fd_fixups); + task_fd_install(target_proc, target_fd, file); + trace_binder_transaction_fd(t, fd, target_fd); + binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", + fd, target_fd); - return ret; + return target_fd; -err_alloc: +err_get_unused_fd: err_security: fput(file); err_fget: @@ -2602,7 +2448,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, struct binder_thread *thread, struct binder_transaction *in_reply_to) { - binder_size_t fdi, fd_buf_size; + binder_size_t fdi, fd_buf_size, num_installed_fds; + int target_fd; uintptr_t parent_buffer; u32 *fd_array; struct binder_proc *proc = thread->proc; @@ -2634,12 +2481,23 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, return -EINVAL; } for (fdi = 0; fdi < fda->num_fds; fdi++) { - int ret = binder_translate_fd(&fd_array[fdi], t, thread, + target_fd = binder_translate_fd(fd_array[fdi], t, thread, in_reply_to); - if (ret < 0) - return ret; + if (target_fd < 0) + goto err_translate_fd_failed; + fd_array[fdi] = target_fd; } return 0; + +err_translate_fd_failed: + /* + * Failed to allocate fd or security error, free fds + * installed so far. + */ + num_installed_fds = fdi; + for (fdi = 0; fdi < num_installed_fds; fdi++) + task_close_fd(target_proc, fd_array[fdi]); + return target_fd; } static int binder_fixup_parent(struct binder_transaction *t, @@ -2711,18 +2569,20 @@ static bool binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, struct binder_thread *thread) { + struct list_head *target_list = NULL; struct binder_node *node = t->buffer->target_node; bool oneway = !!(t->flags & TF_ONE_WAY); - bool pending_async = false; + bool wakeup = true; BUG_ON(!node); binder_node_lock(node); if (oneway) { BUG_ON(thread); if (node->has_async_transaction) { - pending_async = true; + target_list = &node->async_todo; + wakeup = false; } else { - node->has_async_transaction = true; + node->has_async_transaction = 1; } } @@ -2734,17 +2594,19 @@ static bool binder_proc_transaction(struct binder_transaction *t, return false; } - if (!thread && !pending_async) + if (!thread && !target_list) thread = binder_select_thread_ilocked(proc); if (thread) - binder_enqueue_thread_work_ilocked(thread, &t->work); - else if (!pending_async) - binder_enqueue_work_ilocked(&t->work, &proc->todo); + target_list = &thread->todo; + else if (!target_list) + target_list = &proc->todo; else - binder_enqueue_work_ilocked(&t->work, &node->async_todo); + BUG_ON(target_list != &node->async_todo); + + binder_enqueue_work_ilocked(&t->work, target_list); - if (!pending_async) + if (wakeup) binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); binder_inner_proc_unlock(proc); @@ -2802,7 +2664,6 @@ static void binder_transaction(struct binder_proc *proc, { int ret; struct binder_transaction *t; - struct binder_work *w; struct binder_work *tcomplete; binder_size_t *offp, *off_end, *off_start; binder_size_t off_min; @@ -2819,8 +2680,6 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t last_fixup_min_off = 0; struct binder_context *context = proc->context; int t_debug_id = atomic_inc_return(&binder_last_id); - char *secctx = NULL; - u32 secctx_sz = 0; e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; @@ -2865,8 +2724,6 @@ static void binder_transaction(struct binder_proc *proc, binder_set_nice(in_reply_to->saved_priority); target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); if (target_thread == NULL) { - /* annotation for sparse */ - __release(&target_thread->proc->inner_lock); return_error = BR_DEAD_REPLY; return_error_line = __LINE__; goto err_dead_binder; @@ -2922,14 +2779,6 @@ static void binder_transaction(struct binder_proc *proc, else return_error = BR_DEAD_REPLY; mutex_unlock(&context->context_mgr_node_lock); - if (target_node && target_proc->pid == proc->pid) { - binder_user_error("%d:%d got transaction to context manager from process owning it\n", - proc->pid, thread->pid); - return_error = BR_FAILED_REPLY; - return_error_param = -EINVAL; - return_error_line = __LINE__; - goto err_invalid_target_handle; - } } if (!target_node) { /* @@ -2948,29 +2797,6 @@ static void binder_transaction(struct binder_proc *proc, goto err_invalid_target_handle; } binder_inner_proc_lock(proc); - - w = list_first_entry_or_null(&thread->todo, - struct binder_work, entry); - if (!(tr->flags & TF_ONE_WAY) && w && - w->type == BINDER_WORK_TRANSACTION) { - /* - * Do not allow new outgoing transaction from a - * thread that has a transaction at the head of - * its todo list. Only need to check the head - * because binder_select_thread_ilocked picks a - * thread from proc->waiting_threads to enqueue - * the transaction, and nothing is queued to the - * todo list while the thread is on waiting_threads. - */ - binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", - proc->pid, thread->pid); - binder_inner_proc_unlock(proc); - return_error = BR_FAILED_REPLY; - return_error_param = -EPROTO; - return_error_line = __LINE__; - goto err_bad_todo_list; - } - if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; @@ -3018,7 +2844,6 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_alloc_t_failed; } - INIT_LIST_HEAD(&t->fd_fixups); binder_stats_created(BINDER_STAT_TRANSACTION); spin_lock_init(&t->lock); @@ -3063,25 +2888,6 @@ static void binder_transaction(struct binder_proc *proc, t->flags = tr->flags; t->priority = task_nice(current); - -// Remove the security check for kernel under 5.0 -/* - - if (target_node && target_node->txn_security_ctx) { - u32 secid; - - security_task_getsecid(proc->tsk, &secid); - ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); - if (ret) { - return_error = BR_FAILED_REPLY; - return_error_param = ret; - return_error_line = __LINE__; - goto err_get_secctx_failed; - } - extra_buffers_size += ALIGN(secctx_sz, sizeof(u64)); - } -*/ - trace_binder_transaction(reply, t, target_node); t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, @@ -3098,19 +2904,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = NULL; goto err_binder_alloc_buf_failed; } - if (secctx) { - size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + - ALIGN(tr->offsets_size, sizeof(void *)) + - ALIGN(extra_buffers_size, sizeof(void *)) - - ALIGN(secctx_sz, sizeof(u64)); - char *kptr = t->buffer->data + buf_offset; - - t->security_ctx = (uintptr_t)kptr + - binder_alloc_get_user_buffer_offset(&target_proc->alloc); - memcpy(kptr, secctx, secctx_sz); - security_release_secctx(secctx, secctx_sz); - secctx = NULL; - } + t->buffer->allow_user_free = 0; t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; @@ -3205,16 +2999,17 @@ static void binder_transaction(struct binder_proc *proc, case BINDER_TYPE_FD: { struct binder_fd_object *fp = to_binder_fd_object(hdr); - int ret = binder_translate_fd(&fp->fd, t, thread, - in_reply_to); + int target_fd = binder_translate_fd(fp->fd, t, thread, + in_reply_to); - if (ret < 0) { + if (target_fd < 0) { return_error = BR_FAILED_REPLY; - return_error_param = ret; + return_error_param = target_fd; return_error_line = __LINE__; goto err_translate_failed; } fp->pad_binder = 0; + fp->fd = target_fd; } break; case BINDER_TYPE_FDA: { struct binder_fd_array_object *fda = @@ -3306,10 +3101,10 @@ static void binder_transaction(struct binder_proc *proc, } } tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + binder_enqueue_work(proc, tcomplete, &thread->todo); t->work.type = BINDER_WORK_TRANSACTION; if (reply) { - binder_enqueue_thread_work(thread, tcomplete); binder_inner_proc_lock(target_proc); if (target_thread->is_dead) { binder_inner_proc_unlock(target_proc); @@ -3317,21 +3112,13 @@ static void binder_transaction(struct binder_proc *proc, } BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction_ilocked(target_thread, in_reply_to); - binder_enqueue_thread_work_ilocked(target_thread, &t->work); + binder_enqueue_work_ilocked(&t->work, &target_thread->todo); binder_inner_proc_unlock(target_proc); wake_up_interruptible_sync(&target_thread->wait); binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); binder_inner_proc_lock(proc); - /* - * Defer the TRANSACTION_COMPLETE, so we don't return to - * userspace immediately; this allows the target process to - * immediately start processing this transaction, reducing - * latency. We will then return the TRANSACTION_COMPLETE when - * the target replies (or there is an error). - */ - binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; @@ -3345,7 +3132,6 @@ static void binder_transaction(struct binder_proc *proc, } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); - binder_enqueue_thread_work(thread, tcomplete); if (!binder_proc_transaction(t, target_proc, NULL)) goto err_dead_proc_or_thread; } @@ -3371,7 +3157,6 @@ static void binder_transaction(struct binder_proc *proc, err_bad_offset: err_bad_parent: err_copy_data_failed: - binder_free_txn_fixups(t); trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, t->buffer, offp); if (target_node) @@ -3380,18 +3165,12 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->transaction = NULL; binder_alloc_free_buf(&target_proc->alloc, t->buffer); err_binder_alloc_buf_failed: - if (secctx) - security_release_secctx(secctx, secctx_sz); -/* -err_get_secctx_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); -*/ err_alloc_tcomplete_failed: kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: -err_bad_todo_list: err_bad_call_stack: err_empty_call_stack: err_dead_binder: @@ -3431,57 +3210,18 @@ static void binder_transaction(struct binder_proc *proc, BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { thread->return_error.cmd = BR_TRANSACTION_COMPLETE; - binder_enqueue_thread_work(thread, &thread->return_error.work); + binder_enqueue_work(thread->proc, + &thread->return_error.work, + &thread->todo); binder_send_failed_reply(in_reply_to, return_error); } else { thread->return_error.cmd = return_error; - binder_enqueue_thread_work(thread, &thread->return_error.work); + binder_enqueue_work(thread->proc, + &thread->return_error.work, + &thread->todo); } } -/** - * binder_free_buf() - free the specified buffer - * @proc: binder proc that owns buffer - * @buffer: buffer to be freed - * - * If buffer for an async transaction, enqueue the next async - * transaction from the node. - * - * Cleanup buffer and free it. - */ -static void -binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) -{ - binder_inner_proc_lock(proc); - if (buffer->transaction) { - buffer->transaction->buffer = NULL; - buffer->transaction = NULL; - } - binder_inner_proc_unlock(proc); - if (buffer->async_transaction && buffer->target_node) { - struct binder_node *buf_node; - struct binder_work *w; - - buf_node = buffer->target_node; - binder_node_inner_lock(buf_node); - BUG_ON(!buf_node->has_async_transaction); - BUG_ON(buf_node->proc != proc); - w = binder_dequeue_work_head_ilocked( - &buf_node->async_todo); - if (!w) { - buf_node->has_async_transaction = false; - } else { - binder_enqueue_work_ilocked( - w, &proc->todo); - binder_wakeup_proc_ilocked(proc); - } - binder_node_inner_unlock(buf_node); - } - trace_binder_transaction_buffer_release(buffer); - binder_transaction_buffer_release(proc, buffer, NULL); - binder_alloc_free_buf(&proc->alloc, buffer); -} - static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, @@ -3653,18 +3393,14 @@ static int binder_thread_write(struct binder_proc *proc, buffer = binder_alloc_prepare_to_free(&proc->alloc, data_ptr); - if (IS_ERR_OR_NULL(buffer)) { - if (PTR_ERR(buffer) == -EPERM) { - binder_user_error( - "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", - proc->pid, thread->pid, - (u64)data_ptr); - } else { - binder_user_error( - "%d:%d BC_FREE_BUFFER u%016llx no match\n", - proc->pid, thread->pid, - (u64)data_ptr); - } + if (buffer == NULL) { + binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", + proc->pid, thread->pid, (u64)data_ptr); + break; + } + if (!buffer->allow_user_free) { + binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", + proc->pid, thread->pid, (u64)data_ptr); break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, @@ -3672,7 +3408,33 @@ static int binder_thread_write(struct binder_proc *proc, proc->pid, thread->pid, (u64)data_ptr, buffer->debug_id, buffer->transaction ? "active" : "finished"); - binder_free_buf(proc, buffer); + + if (buffer->transaction) { + buffer->transaction->buffer = NULL; + buffer->transaction = NULL; + } + if (buffer->async_transaction && buffer->target_node) { + struct binder_node *buf_node; + struct binder_work *w; + + buf_node = buffer->target_node; + binder_node_inner_lock(buf_node); + BUG_ON(!buf_node->has_async_transaction); + BUG_ON(buf_node->proc != proc); + w = binder_dequeue_work_head_ilocked( + &buf_node->async_todo); + if (!w) { + buf_node->has_async_transaction = 0; + } else { + binder_enqueue_work_ilocked( + w, &proc->todo); + binder_wakeup_proc_ilocked(proc); + } + binder_node_inner_unlock(buf_node); + } + trace_binder_transaction_buffer_release(buffer); + binder_transaction_buffer_release(proc, buffer, NULL); + binder_alloc_free_buf(&proc->alloc, buffer); break; } @@ -3760,9 +3522,10 @@ static int binder_thread_write(struct binder_proc *proc, WARN_ON(thread->return_error.cmd != BR_OK); thread->return_error.cmd = BR_ERROR; - binder_enqueue_thread_work( - thread, - &thread->return_error.work); + binder_enqueue_work( + thread->proc, + &thread->return_error.work, + &thread->todo); binder_debug( BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", @@ -3842,9 +3605,9 @@ static int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_thread_work_ilocked( - thread, - &death->work); + binder_enqueue_work_ilocked( + &death->work, + &thread->todo); else { binder_enqueue_work_ilocked( &death->work, @@ -3884,7 +3647,7 @@ static int binder_thread_write(struct binder_proc *proc, } } binder_debug(BINDER_DEBUG_DEAD_BINDER, - "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", + "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", proc->pid, thread->pid, (u64)cookie, death); if (death == NULL) { @@ -3899,8 +3662,8 @@ static int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_thread_work_ilocked( - thread, &death->work); + binder_enqueue_work_ilocked( + &death->work, &thread->todo); else { binder_enqueue_work_ilocked( &death->work, @@ -3995,76 +3758,6 @@ static int binder_wait_for_work(struct binder_thread *thread, return ret; } -/** - * binder_apply_fd_fixups() - finish fd translation - * @t: binder transaction with list of fd fixups - * - * Now that we are in the context of the transaction target - * process, we can allocate and install fds. Process the - * list of fds to translate and fixup the buffer with the - * new fds. - * - * If we fail to allocate an fd, then free the resources by - * fput'ing files that have not been processed and ksys_close'ing - * any fds that have already been allocated. - */ -static int binder_apply_fd_fixups(struct binder_transaction *t) -{ - struct binder_txn_fd_fixup *fixup, *tmp; - int ret = 0; - - list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { - int fd = get_unused_fd_flags(O_CLOEXEC); - u32 *fdp; - - if (fd < 0) { - binder_debug(BINDER_DEBUG_TRANSACTION, - "failed fd fixup txn %d fd %d\n", - t->debug_id, fd); - ret = -ENOMEM; - break; - } - binder_debug(BINDER_DEBUG_TRANSACTION, - "fd fixup txn %d fd %d\n", - t->debug_id, fd); - trace_binder_transaction_fd_recv(t, fd, fixup->offset); - fd_install(fd, fixup->file); - fixup->file = NULL; - fdp = (u32 *)(t->buffer->data + fixup->offset); - /* - * This store can cause problems for CPUs with a - * VIVT cache (eg ARMv5) since the cache cannot - * detect virtual aliases to the same physical cacheline. - * To support VIVT, this address and the user-space VA - * would both need to be flushed. Since this kernel - * VA is not constructed via page_to_virt(), we can't - * use flush_dcache_page() on it, so we'd have to use - * an internal function. If devices with VIVT ever - * need to run Android, we'll either need to go back - * to patching the translated fd from the sender side - * (using the non-standard kernel functions), or rework - * how the kernel uses the buffer to use page_to_virt() - * addresses instead of allocating in our own vm area. - * - * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT. - */ - *fdp = fd; - } - list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { - if (fixup->file) { - fput(fixup->file); - } else if (ret) { - u32 *fdp = (u32 *)(t->buffer->data + fixup->offset); - - binder_deferred_fd_close(*fdp); - } - list_del(&fixup->fixup_entry); - kfree(fixup); - } - - return ret; -} - static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, @@ -4118,13 +3811,11 @@ static int binder_thread_read(struct binder_proc *proc, while (1) { uint32_t cmd; - struct binder_transaction_data_secctx tr; - struct binder_transaction_data *trd = &tr.transaction_data; + struct binder_transaction_data tr; struct binder_work *w = NULL; struct list_head *list = NULL; struct binder_transaction *t = NULL; struct binder_thread *t_from; - size_t trsize = sizeof(*trd); binder_inner_proc_lock(proc); if (!binder_worklist_empty_ilocked(&thread->todo)) @@ -4146,8 +3837,6 @@ static int binder_thread_read(struct binder_proc *proc, break; } w = binder_dequeue_work_head_ilocked(list); - if (binder_worklist_empty_ilocked(&thread->todo)) - thread->process_todo = false; switch (w->type) { case BINDER_WORK_TRANSACTION: { @@ -4162,17 +3851,14 @@ static int binder_thread_read(struct binder_proc *proc, binder_inner_proc_unlock(proc); if (put_user(e->cmd, (uint32_t __user *)ptr)) return -EFAULT; - cmd = e->cmd; e->cmd = BR_OK; ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, cmd); + binder_stat_br(proc, thread, e->cmd); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_inner_proc_unlock(proc); cmd = BR_TRANSACTION_COMPLETE; - kfree(w); - binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); @@ -4181,6 +3867,8 @@ static int binder_thread_read(struct binder_proc *proc, binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); + kfree(w); + binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); @@ -4310,11 +3998,6 @@ static int binder_thread_read(struct binder_proc *proc, if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; - default: - binder_inner_proc_unlock(proc); - pr_err("%d:%d: bad work type %d\n", - proc->pid, thread->pid, w->type); - break; } if (!t) @@ -4324,8 +4007,8 @@ static int binder_thread_read(struct binder_proc *proc, if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; - trd->target.ptr = target_node->ptr; - trd->cookie = target_node->cookie; + tr.target.ptr = target_node->ptr; + tr.cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) @@ -4335,67 +4018,33 @@ static int binder_thread_read(struct binder_proc *proc, binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { - trd->target.ptr = 0; - trd->cookie = 0; + tr.target.ptr = 0; + tr.cookie = 0; cmd = BR_REPLY; } - trd->code = t->code; - trd->flags = t->flags; - trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); + tr.code = t->code; + tr.flags = t->flags; + tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); t_from = binder_get_txn_from(t); if (t_from) { struct task_struct *sender = t_from->proc->tsk; - trd->sender_pid = - task_tgid_nr_ns(sender, - task_active_pid_ns(current)); + tr.sender_pid = task_tgid_nr_ns(sender, + task_active_pid_ns(current)); } else { - trd->sender_pid = 0; + tr.sender_pid = 0; } - ret = binder_apply_fd_fixups(t); - if (ret) { - struct binder_buffer *buffer = t->buffer; - bool oneway = !!(t->flags & TF_ONE_WAY); - int tid = t->debug_id; - - if (t_from) - binder_thread_dec_tmpref(t_from); - buffer->transaction = NULL; - binder_cleanup_transaction(t, "fd fixups failed", - BR_FAILED_REPLY); - binder_free_buf(proc, buffer); - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", - proc->pid, thread->pid, - oneway ? "async " : - (cmd == BR_REPLY ? "reply " : ""), - tid, BR_FAILED_REPLY, ret, __LINE__); - if (cmd == BR_REPLY) { - cmd = BR_FAILED_REPLY; - if (put_user(cmd, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, cmd); - break; - } - continue; - } - trd->data_size = t->buffer->data_size; - trd->offsets_size = t->buffer->offsets_size; - trd->data.ptr.buffer = (binder_uintptr_t) + tr.data_size = t->buffer->data_size; + tr.offsets_size = t->buffer->offsets_size; + tr.data.ptr.buffer = (binder_uintptr_t) ((uintptr_t)t->buffer->data + binder_alloc_get_user_buffer_offset(&proc->alloc)); - trd->data.ptr.offsets = trd->data.ptr.buffer + + tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); - tr.secctx = t->security_ctx; - if (t->security_ctx) { - cmd = BR_TRANSACTION_SEC_CTX; - trsize = sizeof(tr); - } if (put_user(cmd, (uint32_t __user *)ptr)) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4406,7 +4055,7 @@ static int binder_thread_read(struct binder_proc *proc, return -EFAULT; } ptr += sizeof(uint32_t); - if (copy_to_user(ptr, &tr, trsize)) { + if (copy_to_user(ptr, &tr, sizeof(tr))) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4415,7 +4064,7 @@ static int binder_thread_read(struct binder_proc *proc, return -EFAULT; } - ptr += trsize; + ptr += sizeof(tr); trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); @@ -4423,18 +4072,16 @@ static int binder_thread_read(struct binder_proc *proc, "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : - (cmd == BR_TRANSACTION_SEC_CTX) ? - "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", + "BR_REPLY", t->debug_id, t_from ? t_from->proc->pid : 0, t_from ? t_from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, - (u64)trd->data.ptr.buffer, - (u64)trd->data.ptr.offsets); + (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); if (t_from) binder_thread_dec_tmpref(t_from); t->buffer->allow_user_free = 1; - if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { + if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { binder_inner_proc_lock(thread->proc); t->to_parent = thread->transaction_stack; t->to_thread = thread; @@ -4626,8 +4273,6 @@ static int binder_thread_release(struct binder_proc *proc, spin_lock(&t->lock); if (t->to_thread == thread) send_reply = t; - } else { - __acquire(&t->lock); } thread->is_dead = true; @@ -4656,11 +4301,7 @@ static int binder_thread_release(struct binder_proc *proc, spin_unlock(&last_t->lock); if (t) spin_lock(&t->lock); - else - __acquire(&t->lock); } - /* annotation for sparse, lock not acquired in last iteration above */ - __release(&t->lock); /* * If this thread used poll, make sure we remove the waitqueue @@ -4670,20 +4311,11 @@ static int binder_thread_release(struct binder_proc *proc, */ if ((thread->looper & BINDER_LOOPER_STATE_POLL) && waitqueue_active(&thread->wait)) { - wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); + wake_up_poll(&thread->wait, POLLHUP | POLLFREE); } binder_inner_proc_unlock(thread->proc); - /* - * This is needed to avoid races between wake_up_poll() above and - * and ep_remove_waitqueue() called for other reasons (eg the epoll file - * descriptor being closed); ep_remove_waitqueue() holds an RCU read - * lock, so we can be sure it's done after calling synchronize_rcu(). - */ - if (thread->looper & BINDER_LOOPER_STATE_POLL) - synchronize_rcu(); - if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); binder_release_work(proc, &thread->todo); @@ -4691,7 +4323,7 @@ static int binder_thread_release(struct binder_proc *proc, return active_transactions; } -static __poll_t binder_poll(struct file *filp, +static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait) { struct binder_proc *proc = filp->private_data; @@ -4699,8 +4331,6 @@ static __poll_t binder_poll(struct file *filp, bool wait_for_proc_work; thread = binder_get_thread(proc); - if (!thread) - return POLLERR; binder_inner_proc_lock(thread->proc); thread->looper |= BINDER_LOOPER_STATE_POLL; @@ -4711,7 +4341,7 @@ static __poll_t binder_poll(struct file *filp, poll_wait(filp, &thread->wait, wait); if (binder_has_work(thread, wait_for_proc_work)) - return EPOLLIN; + return POLLIN; return 0; } @@ -4782,8 +4412,7 @@ static int binder_ioctl_write_read(struct file *filp, return ret; } -static int binder_ioctl_set_ctx_mgr(struct file *filp, - struct flat_binder_object *fbo) +static int binder_ioctl_set_ctx_mgr(struct file *filp) { int ret = 0; struct binder_proc *proc = filp->private_data; @@ -4812,7 +4441,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp, } else { context->binder_context_mgr_uid = curr_euid; } - new_node = binder_new_node(proc, fbo); + new_node = binder_new_node(proc, NULL); if (!new_node) { ret = -ENOMEM; goto out; @@ -4830,42 +4459,6 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp, return ret; } -static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, - struct binder_node_info_for_ref *info) -{ - struct binder_node *node; - struct binder_context *context = proc->context; - __u32 handle = info->handle; - - if (info->strong_count || info->weak_count || info->reserved1 || - info->reserved2 || info->reserved3) { - binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", - proc->pid); - return -EINVAL; - } - - /* This ioctl may only be used by the context manager */ - mutex_lock(&context->context_mgr_node_lock); - if (!context->binder_context_mgr_node || - context->binder_context_mgr_node->proc != proc) { - mutex_unlock(&context->context_mgr_node_lock); - return -EPERM; - } - mutex_unlock(&context->context_mgr_node_lock); - - node = binder_get_node_from_ref(proc, handle, true, NULL); - if (!node) - return -EINVAL; - - info->strong_count = node->local_strong_refs + - node->internal_strong_refs; - info->weak_count = node->local_weak_refs; - - binder_put_node(node); - - return 0; -} - static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, struct binder_node_debug_info *info) { @@ -4935,20 +4528,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) binder_inner_proc_unlock(proc); break; } - case BINDER_SET_CONTEXT_MGR_EXT: { - struct flat_binder_object fbo; - - if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { - ret = -EINVAL; - goto err; - } - ret = binder_ioctl_set_ctx_mgr(filp, &fbo); - if (ret) - goto err; - break; - } case BINDER_SET_CONTEXT_MGR: - ret = binder_ioctl_set_ctx_mgr(filp, NULL); + ret = binder_ioctl_set_ctx_mgr(filp); if (ret) goto err; break; @@ -4972,25 +4553,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; } - case BINDER_GET_NODE_INFO_FOR_REF: { - struct binder_node_info_for_ref info; - - if (copy_from_user(&info, ubuf, sizeof(info))) { - ret = -EFAULT; - goto err; - } - - ret = binder_ioctl_get_node_info_for_ref(proc, &info); - if (ret < 0) - goto err; - - if (copy_to_user(ubuf, &info, sizeof(info))) { - ret = -EFAULT; - goto err; - } - - break; - } case BINDER_GET_NODE_DEBUG_INFO: { struct binder_node_debug_info info; @@ -5046,9 +4608,14 @@ static void binder_vma_close(struct vm_area_struct *vma) (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); binder_alloc_vma_close(&proc->alloc); + binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } -static vm_fault_t binder_vm_fault(struct vm_fault *vmf) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +static int binder_vm_fault(struct vm_fault *vmf) +#else +static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +#endif { return VM_FAULT_SIGBUS; } @@ -5082,19 +4649,20 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) failure_string = "bad vm_flags"; goto err_bad_arg; } - vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; - vma->vm_flags &= ~VM_MAYWRITE; - + vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; ret = binder_alloc_mmap_handler(&proc->alloc, vma); if (ret) return ret; + mutex_lock(&proc->files_lock); + proc->files = get_files_struct(current); + mutex_unlock(&proc->files_lock); return 0; err_bad_arg: - pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, + pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } @@ -5104,7 +4672,7 @@ static int binder_open(struct inode *nodp, struct file *filp) struct binder_proc *proc; struct binder_device *binder_dev; - binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, + binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); @@ -5114,6 +4682,7 @@ static int binder_open(struct inode *nodp, struct file *filp) spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; + mutex_init(&proc->files_lock); INIT_LIST_HEAD(&proc->todo); proc->default_priority = task_nice(current); /* binderfs stashes devices in i_private */ @@ -5146,10 +4715,10 @@ static int binder_open(struct inode *nodp, struct file *filp) * anyway print all contexts that a given PID has, so this * is not a problem. */ - proc->debugfs_entry = debugfs_create_file(strbuf, 0444, + proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, binder_debugfs_dir_entry_proc, (void *)(unsigned long)proc->pid, - &proc_fops); + &binder_proc_fops); } return 0; @@ -5267,6 +4836,8 @@ static void binder_deferred_release(struct binder_proc *proc) struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; + BUG_ON(proc->files); + mutex_lock(&binder_procs_lock); hlist_del(&proc->proc_node); mutex_unlock(&binder_procs_lock); @@ -5348,6 +4919,7 @@ static void binder_deferred_release(struct binder_proc *proc) static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; + struct files_struct *files; int defer; @@ -5365,11 +4937,23 @@ static void binder_deferred_func(struct work_struct *work) } mutex_unlock(&binder_deferred_lock); + files = NULL; + if (defer & BINDER_DEFERRED_PUT_FILES) { + mutex_lock(&proc->files_lock); + files = proc->files; + if (files) + proc->files = NULL; + mutex_unlock(&proc->files_lock); + } + if (defer & BINDER_DEFERRED_FLUSH) binder_deferred_flush(proc); if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ + + if (files) + put_files_struct(files); } while (proc); } static DECLARE_WORK(binder_deferred_work, binder_deferred_func); @@ -5398,7 +4982,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, spin_lock(&t->lock); to_proc = t->to_proc; seq_printf(m, - "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", + "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, @@ -5422,7 +5006,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, } if (buffer->target_node) seq_printf(m, " node %d", buffer->target_node->debug_id); - seq_printf(m, " size %zd:%zd data %pK\n", + seq_printf(m, " size %zd:%zd data %p\n", buffer->data_size, buffer->offsets_size, buffer->data); } @@ -5573,9 +5157,6 @@ static void print_binder_proc(struct seq_file *m, for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); - if (!print_all && !node->has_async_transaction) - continue; - /* * take a temporary reference on the node so it * survives and isn't removed from the tree @@ -5780,7 +5361,7 @@ static void print_binder_proc_stats(struct seq_file *m, } -static int state_show(struct seq_file *m, void *unused) +static int binder_state_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct binder_node *node; @@ -5819,7 +5400,7 @@ static int state_show(struct seq_file *m, void *unused) return 0; } -static int stats_show(struct seq_file *m, void *unused) +static int binder_stats_show(struct seq_file *m, void *unused) { struct binder_proc *proc; @@ -5835,7 +5416,7 @@ static int stats_show(struct seq_file *m, void *unused) return 0; } -static int transactions_show(struct seq_file *m, void *unused) +static int binder_transactions_show(struct seq_file *m, void *unused) { struct binder_proc *proc; @@ -5848,7 +5429,7 @@ static int transactions_show(struct seq_file *m, void *unused) return 0; } -static int proc_show(struct seq_file *m, void *unused) +static int binder_proc_show(struct seq_file *m, void *unused) { struct binder_proc *itr; int pid = (unsigned long)m->private; @@ -5891,7 +5472,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m, "\n" : " (incomplete)\n"); } -static int transaction_log_show(struct seq_file *m, void *unused) +static int binder_transaction_log_show(struct seq_file *m, void *unused) { struct binder_transaction_log *log = m->private; unsigned int log_cur = atomic_read(&log->cur); @@ -5923,10 +5504,12 @@ const struct file_operations binder_fops = { .release = binder_release, }; -DEFINE_SHOW_ATTRIBUTE(state); -DEFINE_SHOW_ATTRIBUTE(stats); -DEFINE_SHOW_ATTRIBUTE(transactions); -DEFINE_SHOW_ATTRIBUTE(transaction_log); +EXPORT_SYMBOL(binder_fops); + +BINDER_DEBUG_ENTRY(state); +BINDER_DEBUG_ENTRY(stats); +BINDER_DEBUG_ENTRY(transactions); +BINDER_DEBUG_ENTRY(transaction_log); static int __init init_binder_device(const char *name) { @@ -5959,14 +5542,11 @@ static int __init init_binder_device(const char *name) static int __init binder_init(void) { int ret; - char *device_name, *device_tmp; + char *device_name, *device_names, *device_tmp; struct binder_device *device; struct hlist_node *tmp; - char *device_names = NULL; - ret = binder_alloc_shrinker_init(); - if (ret) - return ret; + binder_alloc_shrinker_init(); atomic_set(&binder_transaction_log.cur, ~0U); atomic_set(&binder_transaction_log_failed.cur, ~0U); @@ -5978,54 +5558,49 @@ static int __init binder_init(void) if (binder_debugfs_dir_entry_root) { debugfs_create_file("state", - 0444, + S_IRUGO, binder_debugfs_dir_entry_root, NULL, - &state_fops); + &binder_state_fops); debugfs_create_file("stats", - 0444, + S_IRUGO, binder_debugfs_dir_entry_root, NULL, - &stats_fops); + &binder_stats_fops); debugfs_create_file("transactions", - 0444, + S_IRUGO, binder_debugfs_dir_entry_root, NULL, - &transactions_fops); + &binder_transactions_fops); debugfs_create_file("transaction_log", - 0444, + S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log, - &transaction_log_fops); + &binder_transaction_log_fops); debugfs_create_file("failed_transaction_log", - 0444, + S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log_failed, - &transaction_log_fops); + &binder_transaction_log_fops); } - if (strcmp(binder_devices_param, "") != 0) { - /* - * Copy the module_parameter string, because we don't want to - * tokenize it in-place. - */ - device_names = kstrdup(binder_devices_param, GFP_KERNEL); - if (!device_names) { - ret = -ENOMEM; - goto err_alloc_device_names_failed; - } - - device_tmp = device_names; - while ((device_name = strsep(&device_tmp, ","))) { - ret = init_binder_device(device_name); - if (ret) - goto err_init_binder_device_failed; - } + /* + * Copy the module_parameter string, because we don't want to + * tokenize it in-place. + */ + device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); + if (!device_names) { + ret = -ENOMEM; + goto err_alloc_device_names_failed; } + strcpy(device_names, binder_devices_param); - ret = init_binderfs(); - if (ret) - goto err_init_binder_device_failed; + device_tmp = device_names; + while ((device_name = strsep(&device_tmp, ","))) { + ret = init_binder_device(device_name); + if (ret) + goto err_init_binder_device_failed; + } return ret; @@ -6044,20 +5619,25 @@ static int __init binder_init(void) return ret; } -module_init(binder_init); -/* - * binder will have no exit function since binderfs instances can be mounted - * multiple times and also in user namespaces finding and destroying them all - * is not feasible without introducing insane locking. Just ignoring existing - * instances on module unload also wouldn't work since we would loose track of - * what major numer was dynamically allocated and also what minor numbers are - * already given out. So this would get us into all kinds of issues with device - * number reuse. So simply don't allow unloading unless we are forced to do so. - */ +static void __exit binder_exit(void) +{ + struct binder_device *device; + struct hlist_node *tmp; -MODULE_AUTHOR("Google, Inc."); -MODULE_DESCRIPTION("Driver for Android binder device"); -MODULE_LICENSE("GPL v2"); + hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { + misc_deregister(&device->miscdev); + kfree(device->context.name); + hlist_del(&device->hlist); + kfree(device); + } + + debugfs_remove_recursive(binder_debugfs_dir_entry_root); +} + +module_init(binder_init); +module_exit(binder_exit); #define CREATE_TRACE_POINTS #include "binder_trace.h" + +MODULE_LICENSE("GPL v2"); diff --git a/binder/binder.h b/binder/binder.h deleted file mode 100644 index 35aac58..0000000 --- a/binder/binder.h +++ /dev/null @@ -1,504 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2008 Google, Inc. - * - * Based on, but no longer compatible with, the original - * OpenBinder.org binder driver interface, which is: - * - * Copyright (c) 2005 Palmsource, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef _UAPI_LINUX_BINDER_H -#define _UAPI_LINUX_BINDER_H - -#include -#include - -#define B_PACK_CHARS(c1, c2, c3, c4) \ - ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) -#define B_TYPE_LARGE 0x85 - -#ifndef EPOLLHUP -#define EPOLLHUP POLLHUP -#endif - -#ifndef EPOLLIN -#define EPOLLIN POLLIN -#endif - -enum { - BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), - BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), - BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), - BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), - BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), - BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE), - BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE), -}; - -enum { - FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, - FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, - - /** - * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts - * - * Only when set, causes senders to include their security - * context - */ - FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000, -}; - -#ifdef BINDER_IPC_32BIT -typedef __u32 binder_size_t; -typedef __u32 binder_uintptr_t; -#else -typedef __u64 binder_size_t; -typedef __u64 binder_uintptr_t; -#endif - -/** - * struct binder_object_header - header shared by all binder metadata objects. - * @type: type of the object - */ -struct binder_object_header { - __u32 type; -}; - -/* - * This is the flattened representation of a Binder object for transfer - * between processes. The 'offsets' supplied as part of a binder transaction - * contains offsets into the data where these structures occur. The Binder - * driver takes care of re-writing the structure type and data as it moves - * between processes. - */ -struct flat_binder_object { - struct binder_object_header hdr; - __u32 flags; - - /* 8 bytes of data. */ - union { - binder_uintptr_t binder; /* local object */ - __u32 handle; /* remote object */ - }; - - /* extra data associated with local object */ - binder_uintptr_t cookie; -}; - -/** - * struct binder_fd_object - describes a filedescriptor to be fixed up. - * @hdr: common header structure - * @pad_flags: padding to remain compatible with old userspace code - * @pad_binder: padding to remain compatible with old userspace code - * @fd: file descriptor - * @cookie: opaque data, used by user-space - */ -struct binder_fd_object { - struct binder_object_header hdr; - __u32 pad_flags; - union { - binder_uintptr_t pad_binder; - __u32 fd; - }; - - binder_uintptr_t cookie; -}; - -/* struct binder_buffer_object - object describing a userspace buffer - * @hdr: common header structure - * @flags: one or more BINDER_BUFFER_* flags - * @buffer: address of the buffer - * @length: length of the buffer - * @parent: index in offset array pointing to parent buffer - * @parent_offset: offset in @parent pointing to this buffer - * - * A binder_buffer object represents an object that the - * binder kernel driver can copy verbatim to the target - * address space. A buffer itself may be pointed to from - * within another buffer, meaning that the pointer inside - * that other buffer needs to be fixed up as well. This - * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT - * flag in @flags, by setting @parent buffer to the index - * in the offset array pointing to the parent binder_buffer_object, - * and by setting @parent_offset to the offset in the parent buffer - * at which the pointer to this buffer is located. - */ -struct binder_buffer_object { - struct binder_object_header hdr; - __u32 flags; - binder_uintptr_t buffer; - binder_size_t length; - binder_size_t parent; - binder_size_t parent_offset; -}; - -enum { - BINDER_BUFFER_FLAG_HAS_PARENT = 0x01, -}; - -/* struct binder_fd_array_object - object describing an array of fds in a buffer - * @hdr: common header structure - * @pad: padding to ensure correct alignment - * @num_fds: number of file descriptors in the buffer - * @parent: index in offset array to buffer holding the fd array - * @parent_offset: start offset of fd array in the buffer - * - * A binder_fd_array object represents an array of file - * descriptors embedded in a binder_buffer_object. It is - * different from a regular binder_buffer_object because it - * describes a list of file descriptors to fix up, not an opaque - * blob of memory, and hence the kernel needs to treat it differently. - * - * An example of how this would be used is with Android's - * native_handle_t object, which is a struct with a list of integers - * and a list of file descriptors. The native_handle_t struct itself - * will be represented by a struct binder_buffer_objct, whereas the - * embedded list of file descriptors is represented by a - * struct binder_fd_array_object with that binder_buffer_object as - * a parent. - */ -struct binder_fd_array_object { - struct binder_object_header hdr; - __u32 pad; - binder_size_t num_fds; - binder_size_t parent; - binder_size_t parent_offset; -}; - -/* - * On 64-bit platforms where user code may run in 32-bits the driver must - * translate the buffer (and local binder) addresses appropriately. - */ - -struct binder_write_read { - binder_size_t write_size; /* bytes to write */ - binder_size_t write_consumed; /* bytes consumed by driver */ - binder_uintptr_t write_buffer; - binder_size_t read_size; /* bytes to read */ - binder_size_t read_consumed; /* bytes consumed by driver */ - binder_uintptr_t read_buffer; -}; - -/* Use with BINDER_VERSION, driver fills in fields. */ -struct binder_version { - /* driver protocol version -- increment with incompatible change */ - __s32 protocol_version; -}; - -/* This is the current protocol version. */ -#ifdef BINDER_IPC_32BIT -#define BINDER_CURRENT_PROTOCOL_VERSION 7 -#else -#define BINDER_CURRENT_PROTOCOL_VERSION 8 -#endif - -/* - * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields. - * Set ptr to NULL for the first call to get the info for the first node, and - * then repeat the call passing the previously returned value to get the next - * nodes. ptr will be 0 when there are no more nodes. - */ -struct binder_node_debug_info { - binder_uintptr_t ptr; - binder_uintptr_t cookie; - __u32 has_strong_ref; - __u32 has_weak_ref; -}; - -struct binder_node_info_for_ref { - __u32 handle; - __u32 strong_count; - __u32 weak_count; - __u32 reserved1; - __u32 reserved2; - __u32 reserved3; -}; - -#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) -#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) -#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) -#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) -#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) -#define BINDER_THREAD_EXIT _IOW('b', 8, __s32) -#define BINDER_VERSION _IOWR('b', 9, struct binder_version) -#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) -#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) -#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object) - -/* - * NOTE: Two special error codes you should check for when calling - * in to the driver are: - * - * EINTR -- The operation has been interupted. This should be - * handled by retrying the ioctl() until a different error code - * is returned. - * - * ECONNREFUSED -- The driver is no longer accepting operations - * from your process. That is, the process is being destroyed. - * You should handle this by exiting from your process. Note - * that once this error code is returned, all further calls to - * the driver from any thread will return this same code. - */ - -enum transaction_flags { - TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ - TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ - TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ - TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ -}; - -struct binder_transaction_data { - /* The first two are only used for bcTRANSACTION and brTRANSACTION, - * identifying the target and contents of the transaction. - */ - union { - /* target descriptor of command transaction */ - __u32 handle; - /* target descriptor of return transaction */ - binder_uintptr_t ptr; - } target; - binder_uintptr_t cookie; /* target object cookie */ - __u32 code; /* transaction command */ - - /* General information about the transaction. */ - __u32 flags; - pid_t sender_pid; - uid_t sender_euid; - binder_size_t data_size; /* number of bytes of data */ - binder_size_t offsets_size; /* number of bytes of offsets */ - - /* If this transaction is inline, the data immediately - * follows here; otherwise, it ends with a pointer to - * the data buffer. - */ - union { - struct { - /* transaction data */ - binder_uintptr_t buffer; - /* offsets from buffer to flat_binder_object structs */ - binder_uintptr_t offsets; - } ptr; - __u8 buf[8]; - } data; -}; - -struct binder_transaction_data_secctx { - struct binder_transaction_data transaction_data; - binder_uintptr_t secctx; -}; - -struct binder_transaction_data_sg { - struct binder_transaction_data transaction_data; - binder_size_t buffers_size; -}; - -struct binder_ptr_cookie { - binder_uintptr_t ptr; - binder_uintptr_t cookie; -}; - -struct binder_handle_cookie { - __u32 handle; - binder_uintptr_t cookie; -} __packed; - -struct binder_pri_desc { - __s32 priority; - __u32 desc; -}; - -struct binder_pri_ptr_cookie { - __s32 priority; - binder_uintptr_t ptr; - binder_uintptr_t cookie; -}; - -enum binder_driver_return_protocol { - BR_ERROR = _IOR('r', 0, __s32), - /* - * int: error code - */ - - BR_OK = _IO('r', 1), - /* No parameters! */ - - BR_TRANSACTION_SEC_CTX = _IOR('r', 2, - struct binder_transaction_data_secctx), - /* - * binder_transaction_data_secctx: the received command. - */ - BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), - BR_REPLY = _IOR('r', 3, struct binder_transaction_data), - /* - * binder_transaction_data: the received command. - */ - - BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), - /* - * not currently supported - * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. - * Else the remote object has acquired a primary reference. - */ - - BR_DEAD_REPLY = _IO('r', 5), - /* - * The target of the last transaction (either a bcTRANSACTION or - * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. - */ - - BR_TRANSACTION_COMPLETE = _IO('r', 6), - /* - * No parameters... always refers to the last transaction requested - * (including replies). Note that this will be sent even for - * asynchronous transactions. - */ - - BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), - BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), - BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), - BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), - /* - * void *: ptr to binder - * void *: cookie for binder - */ - - BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), - /* - * not currently supported - * int: priority - * void *: ptr to binder - * void *: cookie for binder - */ - - BR_NOOP = _IO('r', 12), - /* - * No parameters. Do nothing and examine the next command. It exists - * primarily so that we can replace it with a BR_SPAWN_LOOPER command. - */ - - BR_SPAWN_LOOPER = _IO('r', 13), - /* - * No parameters. The driver has determined that a process has no - * threads waiting to service incoming transactions. When a process - * receives this command, it must spawn a new service thread and - * register it via bcENTER_LOOPER. - */ - - BR_FINISHED = _IO('r', 14), - /* - * not currently supported - * stop threadpool thread - */ - - BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t), - /* - * void *: cookie - */ - BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t), - /* - * void *: cookie - */ - - BR_FAILED_REPLY = _IO('r', 17), - /* - * The the last transaction (either a bcTRANSACTION or - * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. - */ -}; - -enum binder_driver_command_protocol { - BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), - BC_REPLY = _IOW('c', 1, struct binder_transaction_data), - /* - * binder_transaction_data: the sent command. - */ - - BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), - /* - * not currently supported - * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. - * Else you have acquired a primary reference on the object. - */ - - BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t), - /* - * void *: ptr to transaction data received on a read - */ - - BC_INCREFS = _IOW('c', 4, __u32), - BC_ACQUIRE = _IOW('c', 5, __u32), - BC_RELEASE = _IOW('c', 6, __u32), - BC_DECREFS = _IOW('c', 7, __u32), - /* - * int: descriptor - */ - - BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), - BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), - /* - * void *: ptr to binder - * void *: cookie for binder - */ - - BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), - /* - * not currently supported - * int: priority - * int: descriptor - */ - - BC_REGISTER_LOOPER = _IO('c', 11), - /* - * No parameters. - * Register a spawned looper thread with the device. - */ - - BC_ENTER_LOOPER = _IO('c', 12), - BC_EXIT_LOOPER = _IO('c', 13), - /* - * No parameters. - * These two commands are sent as an application-level thread - * enters and exits the binder loop, respectively. They are - * used so the binder can have an accurate count of the number - * of looping threads it has available. - */ - - BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, - struct binder_handle_cookie), - /* - * int: handle - * void *: cookie - */ - - BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, - struct binder_handle_cookie), - /* - * int: handle - * void *: cookie - */ - - BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t), - /* - * void *: cookie - */ - - BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg), - BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg), - /* - * binder_transaction_data_sg: the sent command. - */ -}; - -#endif /* _UAPI_LINUX_BINDER_H */ - diff --git a/binder/binder_alloc.c b/binder/binder_alloc.c index c4d8b78..5d27553 100644 --- a/binder/binder_alloc.c +++ b/binder/binder_alloc.c @@ -17,8 +17,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include -#include +#include #include #include #include @@ -27,29 +28,32 @@ #include #include #include -#include -#include +#include #include "binder_alloc.h" #include "binder_trace.h" +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif + struct list_lru binder_alloc_lru; static DEFINE_MUTEX(binder_alloc_mmap_lock); enum { - BINDER_DEBUG_USER_ERROR = 1U << 0, BINDER_DEBUG_OPEN_CLOSE = 1U << 1, BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, }; -static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR; +static uint32_t binder_alloc_debug_mask; -module_param_named(alloc_debug_mask, binder_alloc_debug_mask, uint, 0644); +module_param_named(debug_mask, binder_alloc_debug_mask, + uint, 0644); #define binder_alloc_debug(mask, x...) \ do { \ if (binder_alloc_debug_mask & mask) \ - pr_info_ratelimited(x); \ + pr_info(x); \ } while (0) static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) @@ -150,12 +154,14 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( else { /* * Guard against user threads attempting to - * free the buffer when in use by kernel or - * after it's already been freed. + * free the buffer twice */ - if (!buffer->allow_user_free) - return ERR_PTR(-EPERM); - buffer->allow_user_free = 0; + if (buffer->free_in_progress) { + pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", + alloc->pid, current->pid, (u64)user_ptr); + return NULL; + } + buffer->free_in_progress = 1; return buffer; } } @@ -185,12 +191,12 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, } static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - void *start, void *end) + void *start, void *end, + struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; struct binder_lru_page *page; - struct vm_area_struct *vma = NULL; struct mm_struct *mm = NULL; bool need_mm = false; @@ -214,18 +220,17 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } } - if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) + if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm)) mm = alloc->vma_vm_mm; if (mm) { - down_read(&mm->mmap_sem); + down_write(&mm->mmap_sem); vma = alloc->vma; } if (!vma && need_mm) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf failed to map pages in userspace, no vma\n", - alloc->pid); + pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", + alloc->pid); goto err_no_vma; } @@ -281,14 +286,11 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, goto err_vm_insert_page_failed; } - if (index + 1 > alloc->pages_high) - alloc->pages_high = index + 1; - trace_binder_alloc_page_end(alloc, index); /* vm_insert_page does not seem to increment the refcount */ } if (mm) { - up_read(&mm->mmap_sem); + up_write(&mm->mmap_sem); mmput(mm); } return 0; @@ -321,47 +323,17 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } err_no_vma: if (mm) { - up_read(&mm->mmap_sem); + up_write(&mm->mmap_sem); mmput(mm); } return vma ? -ENOMEM : -ESRCH; } - -static inline void binder_alloc_set_vma(struct binder_alloc *alloc, - struct vm_area_struct *vma) -{ - if (vma) - alloc->vma_vm_mm = vma->vm_mm; - /* - * If we see alloc->vma is not NULL, buffer data structures set up - * completely. Look at smp_rmb side binder_alloc_get_vma. - * We also want to guarantee new alloc->vma_vm_mm is always visible - * if alloc->vma is set. - */ - smp_wmb(); - alloc->vma = vma; -} - -static inline struct vm_area_struct *binder_alloc_get_vma( - struct binder_alloc *alloc) -{ - struct vm_area_struct *vma = NULL; - - if (alloc->vma) { - /* Look at description in binder_alloc_set_vma */ - smp_rmb(); - vma = alloc->vma; - } - return vma; -} - -static struct binder_buffer *binder_alloc_new_buf_locked( - struct binder_alloc *alloc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async) +struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) { struct rb_node *n = alloc->free_buffers.rb_node; struct binder_buffer *buffer; @@ -372,10 +344,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked( size_t size, data_offsets_size; int ret; - if (!binder_alloc_get_vma(alloc)) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf, no vma\n", - alloc->pid); + if (alloc->vma == NULL) { + pr_err("%d: binder_alloc_buf, no vma\n", + alloc->pid); return ERR_PTR(-ESRCH); } @@ -447,14 +418,11 @@ static struct binder_buffer *binder_alloc_new_buf_locked( if (buffer_size > largest_free_size) largest_free_size = buffer_size; } - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf size %zd failed, no address space\n", - alloc->pid, size); - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", - total_alloc_size, allocated_buffers, - largest_alloc_size, total_free_size, - free_buffers, largest_free_size); + pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", + alloc->pid, size); + pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", + total_alloc_size, allocated_buffers, largest_alloc_size, + total_free_size, free_buffers, largest_free_size); return ERR_PTR(-ENOSPC); } if (n == NULL) { @@ -474,7 +442,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; ret = binder_update_page_range(alloc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL); if (ret) return ERR_PTR(ret); @@ -495,7 +463,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( rb_erase(best_fit, &alloc->free_buffers); buffer->free = 0; - buffer->allow_user_free = 0; + buffer->free_in_progress = 0; binder_insert_allocated_buffer_locked(alloc, buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got %pK\n", @@ -515,7 +483,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( err_alloc_buf_struct_failed: binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), - end_page_addr); + end_page_addr, NULL); return ERR_PTR(-ENOMEM); } @@ -599,7 +567,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, alloc->pid, buffer->data, prev->data, next ? next->data : NULL); binder_update_page_range(alloc, 0, buffer_start_page(buffer), - buffer_start_page(buffer) + PAGE_SIZE); + buffer_start_page(buffer) + PAGE_SIZE, + NULL); } list_del(&buffer->entry); kfree(buffer); @@ -636,7 +605,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), + NULL); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; @@ -725,8 +695,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, } } #endif - alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, - sizeof(alloc->pages[0]), + alloc->pages = kzalloc(sizeof(alloc->pages[0]) * + ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); if (alloc->pages == NULL) { ret = -ENOMEM; @@ -747,7 +717,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; - binder_alloc_set_vma(alloc, vma); + barrier(); + alloc->vma = vma; + alloc->vma_vm_mm = vma->vm_mm; mmgrab(alloc->vma_vm_mm); return 0; @@ -762,10 +734,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, err_get_vm_area_failed: err_already_mapped: mutex_unlock(&binder_alloc_mmap_lock); - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%s: %d %lx-%lx %s failed %d\n", __func__, - alloc->pid, vma->vm_start, vma->vm_end, - failure_string, ret); + pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, + alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } @@ -776,10 +746,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) int buffers, page_count; struct binder_buffer *buffer; - buffers = 0; - mutex_lock(&alloc->mutex); BUG_ON(alloc->vma); + buffers = 0; + mutex_lock(&alloc->mutex); while ((n = rb_first(&alloc->allocated_buffers))) { buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -890,7 +860,6 @@ void binder_alloc_print_pages(struct seq_file *m, } mutex_unlock(&alloc->mutex); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); - seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); } /** @@ -922,7 +891,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) */ void binder_alloc_vma_close(struct binder_alloc *alloc) { - binder_alloc_set_vma(alloc, NULL); + WRITE_ONCE(alloc->vma, NULL); } /** @@ -938,7 +907,6 @@ enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, spinlock_t *lock, void *cb_arg) - __must_hold(lock) { struct mm_struct *mm = NULL; struct binder_lru_page *page = container_of(item, @@ -958,13 +926,14 @@ enum lru_status binder_alloc_free_page(struct list_head *item, index = page - alloc->pages; page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; - - mm = alloc->vma_vm_mm; - if (!mmget_not_zero(mm)) - goto err_mmget; - if (!down_write_trylock(&mm->mmap_sem)) - goto err_down_write_mmap_sem_failed; - vma = binder_alloc_get_vma(alloc); + vma = alloc->vma; + if (vma) { + if (!mmget_not_zero(alloc->vma_vm_mm)) + goto err_mmget; + mm = alloc->vma_vm_mm; + if (!down_write_trylock(&mm->mmap_sem)) + goto err_down_write_mmap_sem_failed; + } list_lru_isolate(lru, item); spin_unlock(lock); @@ -977,9 +946,10 @@ enum lru_status binder_alloc_free_page(struct list_head *item, PAGE_SIZE); trace_binder_unmap_user_end(alloc, index); + + up_write(&mm->mmap_sem); + mmput(mm); } - up_write(&mm->mmap_sem); - mmput(mm); trace_binder_unmap_kernel_start(alloc, index); @@ -1019,7 +989,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) return ret; } -static struct shrinker binder_shrinker = { +struct shrinker binder_shrinker = { .count_objects = binder_shrink_count, .scan_objects = binder_shrink_scan, .seeks = DEFAULT_SEEKS, @@ -1039,14 +1009,8 @@ void binder_alloc_init(struct binder_alloc *alloc) INIT_LIST_HEAD(&alloc->buffers); } -int binder_alloc_shrinker_init(void) +void binder_alloc_shrinker_init(void) { - int ret = list_lru_init(&binder_alloc_lru); - - if (ret == 0) { - ret = register_shrinker(&binder_shrinker); - if (ret) - list_lru_destroy(&binder_alloc_lru); - } - return ret; + list_lru_init(&binder_alloc_lru); + register_shrinker(&binder_shrinker); } diff --git a/binder/binder_alloc.h b/binder/binder_alloc.h index 366c833..2dd33b6 100644 --- a/binder/binder_alloc.h +++ b/binder/binder_alloc.h @@ -15,7 +15,6 @@ #ifndef _LINUX_BINDER_ALLOC_H #define _LINUX_BINDER_ALLOC_H -#include #include #include #include @@ -31,16 +30,16 @@ struct binder_transaction; * struct binder_buffer - buffer used for binder transactions * @entry: entry alloc->buffers * @rb_node: node for allocated_buffers/free_buffers rb trees - * @free: %true if buffer is free - * @allow_user_free: %true if user is allowed to free buffer - * @async_transaction: %true if buffer is in use for an async txn - * @debug_id: unique ID for debugging - * @transaction: pointer to associated struct binder_transaction - * @target_node: struct binder_node associated with this buffer - * @data_size: size of @transaction data - * @offsets_size: size of array of offsets - * @extra_buffers_size: size of space for other objects (like sg lists) - * @data: pointer to base of buffer space + * @free: true if buffer is free + * @allow_user_free: describe the second member of struct blah, + * @async_transaction: describe the second member of struct blah, + * @debug_id: describe the second member of struct blah, + * @transaction: describe the second member of struct blah, + * @target_node: describe the second member of struct blah, + * @data_size: describe the second member of struct blah, + * @offsets_size: describe the second member of struct blah, + * @extra_buffers_size: describe the second member of struct blah, + * @data:i describe the second member of struct blah, * * Bookkeeping structure for binder transaction buffers */ @@ -51,7 +50,8 @@ struct binder_buffer { unsigned free:1; unsigned allow_user_free:1; unsigned async_transaction:1; - unsigned debug_id:29; + unsigned free_in_progress:1; + unsigned debug_id:28; struct binder_transaction *transaction; @@ -92,7 +92,6 @@ struct binder_lru_page { * @pages: array of binder_lru_page * @buffer_size: size of address space specified via mmap * @pid: pid for associated binder_proc (invariant after init) - * @pages_high: high watermark of offset in @pages * * Bookkeeping structure for per-proc address space management for binder * buffers. It is normally initialized during binder_init() and binder_mmap() @@ -113,10 +112,9 @@ struct binder_alloc { size_t buffer_size; uint32_t buffer_free; int pid; - size_t pages_high; }; -#if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_SELFTEST) +#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST void binder_selftest_alloc(struct binder_alloc *alloc); #else static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} @@ -130,7 +128,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t extra_buffers_size, int is_async); extern void binder_alloc_init(struct binder_alloc *alloc); -extern int binder_alloc_shrinker_init(void); +void binder_alloc_shrinker_init(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); extern struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, diff --git a/binder/binderfs.h b/binder/binder_ctl.h similarity index 77% rename from binder/binderfs.h rename to binder/binder_ctl.h index 29edbad..65b2efd 100644 --- a/binder/binderfs.h +++ b/binder/binder_ctl.h @@ -4,15 +4,15 @@ * */ -#ifndef _UAPI_LINUX_BINDERFS_H -#define _UAPI_LINUX_BINDERFS_H +#ifndef _UAPI_LINUX_BINDER_CTL_H +#define _UAPI_LINUX_BINDER_CTL_H +#include #include #include -#include "binder.h" #define BINDERFS_MAX_NAME 255 -#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 + /** * struct binderfs_device - retrieve information about a new binder device * @name: the name to use for the new binderfs binder device @@ -22,8 +22,8 @@ */ struct binderfs_device { char name[BINDERFS_MAX_NAME + 1]; - __u32 major; - __u32 minor; + __u8 major; + __u8 minor; }; /** @@ -31,5 +31,5 @@ struct binderfs_device { */ #define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device) -#endif /* _UAPI_LINUX_BINDERFS_H */ +#endif /* _UAPI_LINUX_BINDER_CTL_H */ diff --git a/binder/binder_internal.h b/binder/binder_internal.h index 759bcec..161c993 100644 --- a/binder/binder_internal.h +++ b/binder/binder_internal.h @@ -5,7 +5,6 @@ #include #include -#include #include #include #include @@ -38,22 +37,13 @@ struct binder_device { extern const struct file_operations binder_fops; -#if IS_ENABLED(CONFIG_ANDROID_BINDERFS) +//#ifdef CONFIG_ANDROID_BINDERFS extern bool is_binderfs_device(const struct inode *inode); -#else -static inline bool is_binderfs_device(const struct inode *inode) -{ - return false; -} -#endif - -#if IS_ENABLED(CONFIG_ANDROID_BINDERFS) -extern int __init init_binderfs(void); -#else -static inline int __init init_binderfs(void) -{ - return 0; -} -#endif +//#else +//static inline bool is_binderfs_device(const struct inode *inode) +//{ +// return false; +//} +//#endif #endif /* _LINUX_BINDER_INTERNAL_H */ diff --git a/binder/binder_trace.h b/binder/binder_trace.h index 14de7ac..76e3b9c 100644 --- a/binder/binder_trace.h +++ b/binder/binder_trace.h @@ -223,40 +223,22 @@ TRACE_EVENT(binder_transaction_ref_to_ref, __entry->dest_ref_debug_id, __entry->dest_ref_desc) ); -TRACE_EVENT(binder_transaction_fd_send, - TP_PROTO(struct binder_transaction *t, int fd, size_t offset), - TP_ARGS(t, fd, offset), +TRACE_EVENT(binder_transaction_fd, + TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd), + TP_ARGS(t, src_fd, dest_fd), TP_STRUCT__entry( __field(int, debug_id) - __field(int, fd) - __field(size_t, offset) - ), - TP_fast_assign( - __entry->debug_id = t->debug_id; - __entry->fd = fd; - __entry->offset = offset; - ), - TP_printk("transaction=%d src_fd=%d offset=%zu", - __entry->debug_id, __entry->fd, __entry->offset) -); - -TRACE_EVENT(binder_transaction_fd_recv, - TP_PROTO(struct binder_transaction *t, int fd, size_t offset), - TP_ARGS(t, fd, offset), - - TP_STRUCT__entry( - __field(int, debug_id) - __field(int, fd) - __field(size_t, offset) + __field(int, src_fd) + __field(int, dest_fd) ), TP_fast_assign( __entry->debug_id = t->debug_id; - __entry->fd = fd; - __entry->offset = offset; + __entry->src_fd = src_fd; + __entry->dest_fd = dest_fd; ), - TP_printk("transaction=%d dest_fd=%d offset=%zu", - __entry->debug_id, __entry->fd, __entry->offset) + TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d", + __entry->debug_id, __entry->src_fd, __entry->dest_fd) ); DECLARE_EVENT_CLASS(binder_buffer_class, @@ -266,17 +248,14 @@ DECLARE_EVENT_CLASS(binder_buffer_class, __field(int, debug_id) __field(size_t, data_size) __field(size_t, offsets_size) - __field(size_t, extra_buffers_size) ), TP_fast_assign( __entry->debug_id = buf->debug_id; __entry->data_size = buf->data_size; __entry->offsets_size = buf->offsets_size; - __entry->extra_buffers_size = buf->extra_buffers_size; ), - TP_printk("transaction=%d data_size=%zd offsets_size=%zd extra_buffers_size=%zd", - __entry->debug_id, __entry->data_size, __entry->offsets_size, - __entry->extra_buffers_size) + TP_printk("transaction=%d data_size=%zd offsets_size=%zd", + __entry->debug_id, __entry->data_size, __entry->offsets_size) ); DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf, diff --git a/binder/binderfs.c b/binder/binderfs.c index dc3e4f5..5199a35 100644 --- a/binder/binderfs.c +++ b/binder/binderfs.c @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#include #include #include #include @@ -11,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -21,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -29,43 +26,27 @@ #include #include #include +#include #include +#include +#include "binder_ctl.h" #include "binder_internal.h" -#include "binder.h" -#include "binderfs.h" -#include "deps.h" #define FIRST_INODE 1 #define SECOND_INODE 2 #define INODE_OFFSET 3 #define INTSTRLEN 21 #define BINDERFS_MAX_MINOR (1U << MINORBITS) -/* Ensure that the initial ipc namespace always has devices available. */ -#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4) + +#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 + +static struct vfsmount *binderfs_mnt; static dev_t binderfs_dev; static DEFINE_MUTEX(binderfs_minors_mutex); static DEFINE_IDA(binderfs_minors); -/** - * binderfs_mount_opts - mount options for binderfs - * @max: maximum number of allocatable binderfs binder devices - */ -struct binderfs_mount_opts { - int max; -}; - -enum { - Opt_max, - Opt_err -}; - -static const match_table_t tokens = { - { Opt_max, "max=%d" }, - { Opt_err, NULL } -}; - /** * binderfs_info - information about a binderfs mount * @ipc_ns: The ipc namespace the binderfs mount belongs to. @@ -75,16 +56,13 @@ static const match_table_t tokens = { * created. * @root_gid: gid that needs to be used when a new binder device is * created. - * @mount_opts: The mount options in use. - * @device_count: The current number of allocated binder devices. */ struct binderfs_info { struct ipc_namespace *ipc_ns; struct dentry *control_dentry; kuid_t root_uid; kgid_t root_gid; - struct binderfs_mount_opts mount_opts; - int device_count; + }; static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) @@ -92,6 +70,7 @@ static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) return inode->i_sb->s_fs_info; } +/* bool is_binderfs_device(const struct inode *inode) { if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC) @@ -99,7 +78,7 @@ bool is_binderfs_device(const struct inode *inode) return false; } - +*/ /** * binderfs_binder_device_create - allocate inode from super block of a * binderfs mount @@ -107,7 +86,7 @@ bool is_binderfs_device(const struct inode *inode) * @userp: buffer to copy information about new device for userspace to * @req: struct binderfs_device as copied from userspace * - * This function allocates a new binder_device and reserves a new minor + * This function allocated a new binder_device and reserves a new minor * number for it. * Minor numbers are limited and tracked globally in binderfs_minors. The * function will stash a struct binder_device for the specific binder @@ -123,34 +102,21 @@ static int binderfs_binder_device_create(struct inode *ref_inode, struct binderfs_device *req) { int minor, ret; - struct dentry *dentry, *root; + struct dentry *dentry, *dup, *root; struct binder_device *device; + size_t name_len = BINDERFS_MAX_NAME + 1; char *name = NULL; - size_t name_len; struct inode *inode = NULL; struct super_block *sb = ref_inode->i_sb; struct binderfs_info *info = sb->s_fs_info; -#if defined(CONFIG_IPC_NS) - bool use_reserve = (info->ipc_ns == show_init_ipc_ns_compat()); -#else - bool use_reserve = true; -#endif /* Reserve new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); - if (++info->device_count <= info->mount_opts.max) - minor = ida_alloc_max_compat(&binderfs_minors, - use_reserve ? BINDERFS_MAX_MINOR : - BINDERFS_MAX_MINOR_CAPPED, - GFP_KERNEL); - else - minor = -ENOSPC; - if (minor < 0) { - --info->device_count; - mutex_unlock(&binderfs_minors_mutex); - return minor; - } + //minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); + minor = ida_simple_get(&binderfs_minors, 0, BINDERFS_MAX_MINOR, GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); + if (minor < 0) + return minor; ret = -ENOMEM; device = kzalloc(sizeof(*device), GFP_KERNEL); @@ -169,13 +135,12 @@ static int binderfs_binder_device_create(struct inode *ref_inode, inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; - req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */ - name_len = strlen(req->name); - /* Make sure to include terminating NUL byte */ - name = kmemdup(req->name, name_len + 1, GFP_KERNEL); + name = kmalloc(name_len, GFP_KERNEL); if (!name) goto err; + strscpy(name, req->name, name_len); + device->binderfs_inode = inode; device->context.binder_context_mgr_uid = INVALID_UID; device->context.name = name; @@ -194,25 +159,28 @@ static int binderfs_binder_device_create(struct inode *ref_inode, root = sb->s_root; inode_lock(d_inode(root)); - - /* look it up */ - dentry = lookup_one_len(name, root, name_len); - if (IS_ERR(dentry)) { + dentry = d_alloc_name(root, name); + if (!dentry) { inode_unlock(d_inode(root)); - ret = PTR_ERR(dentry); + ret = -ENOMEM; goto err; } - if (d_really_is_positive(dentry)) { - /* already exists */ - dput(dentry); - inode_unlock(d_inode(root)); - ret = -EEXIST; - goto err; + /* Verify that the name userspace gave us is not already in use. */ + dup = d_lookup(root, &dentry->d_name); + if (dup) { + if (d_really_is_positive(dup)) { + dput(dup); + dput(dentry); + inode_unlock(d_inode(root)); + ret = -EEXIST; + goto err; + } + dput(dup); } inode->i_private = device; - d_instantiate(dentry, inode); + d_add(dentry, inode); fsnotify_create(root->d_inode, dentry); inode_unlock(d_inode(root)); @@ -222,8 +190,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode, kfree(name); kfree(device); mutex_lock(&binderfs_minors_mutex); - --info->device_count; - ida_free_compat(&binderfs_minors, minor); + //ida_free(&binderfs_minors, minor); + ida_simple_remove(&binderfs_minors, minor); mutex_unlock(&binderfs_minors_mutex); iput(inode); @@ -268,7 +236,6 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd, static void binderfs_evict_inode(struct inode *inode) { struct binder_device *device = inode->i_private; - struct binderfs_info *info = BINDERFS_I(inode); clear_inode(inode); @@ -276,95 +243,51 @@ static void binderfs_evict_inode(struct inode *inode) return; mutex_lock(&binderfs_minors_mutex); - --info->device_count; - ida_free_compat(&binderfs_minors, device->miscdev.minor); + //ida_free(&binderfs_minors, device->miscdev.minor); + ida_simple_remove(&binderfs_minors, device->miscdev.minor); mutex_unlock(&binderfs_minors_mutex); kfree(device->context.name); kfree(device); } -/** - * binderfs_parse_mount_opts - parse binderfs mount options - * @data: options to set (can be NULL in which case defaults are used) - */ -static int binderfs_parse_mount_opts(char *data, - struct binderfs_mount_opts *opts) -{ - char *p; - opts->max = BINDERFS_MAX_MINOR; - - while ((p = strsep(&data, ",")) != NULL) { - substring_t args[MAX_OPT_ARGS]; - int token; - int max_devices; - - if (!*p) - continue; - - token = match_token(p, tokens, args); - switch (token) { - case Opt_max: - if (match_int(&args[0], &max_devices) || - (max_devices < 0 || - (max_devices > BINDERFS_MAX_MINOR))) - return -EINVAL; - - opts->max = max_devices; - break; - default: - pr_err("Invalid mount options\n"); - return -EINVAL; - } - } - - return 0; -} - -static int binderfs_remount(struct super_block *sb, int *flags, char *data) -{ - struct binderfs_info *info = sb->s_fs_info; - return binderfs_parse_mount_opts(data, &info->mount_opts); -} - -static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root) -{ - struct binderfs_info *info; - - info = root->d_sb->s_fs_info; - if (info->mount_opts.max <= BINDERFS_MAX_MINOR) - seq_printf(seq, ",max=%d", info->mount_opts.max); - - return 0; -} - static const struct super_operations binderfs_super_ops = { - .evict_inode = binderfs_evict_inode, - .remount_fs = binderfs_remount, - .show_options = binderfs_show_mount_opts, - .statfs = simple_statfs, + .statfs = simple_statfs, + .evict_inode = binderfs_evict_inode, }; -static inline bool is_binderfs_control_device(const struct dentry *dentry) -{ - struct binderfs_info *info = dentry->d_sb->s_fs_info; - return info->control_dentry == dentry; -} - static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { - if (is_binderfs_control_device(old_dentry) || - is_binderfs_control_device(new_dentry)) + struct inode *inode = d_inode(old_dentry); + + /* binderfs doesn't support directories. */ + if (d_is_dir(old_dentry)) return -EPERM; - return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags); + if (flags & ~RENAME_NOREPLACE) + return -EINVAL; + + if (!simple_empty(new_dentry)) + return -ENOTEMPTY; + + if (d_really_is_positive(new_dentry)) + simple_unlink(new_dir, new_dentry); + + old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime = + new_dir->i_mtime = inode->i_ctime = current_time(old_dir); + + return 0; } static int binderfs_unlink(struct inode *dir, struct dentry *dentry) { - if (is_binderfs_control_device(dentry)) + /* + * The control dentry is only ever touched during mount so checking it + * here should not require us to take lock. + */ + if (BINDERFS_I(dir)->control_dentry == dentry) return -EPERM; return simple_unlink(dir, dentry); @@ -395,16 +318,13 @@ static int binderfs_binder_ctl_create(struct super_block *sb) struct inode *inode = NULL; struct dentry *root = sb->s_root; struct binderfs_info *info = sb->s_fs_info; -#if defined(CONFIG_IPC_NS) - bool use_reserve = (info->ipc_ns == show_init_ipc_ns_compat()); -#else - bool use_reserve = true; -#endif device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) return -ENOMEM; + inode_lock(d_inode(root)); + /* If we have already created a binder-control node, return. */ if (info->control_dentry) { ret = 0; @@ -418,10 +338,8 @@ static int binderfs_binder_ctl_create(struct super_block *sb) /* Reserve a new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); - minor = ida_alloc_max_compat(&binderfs_minors, - use_reserve ? BINDERFS_MAX_MINOR : - BINDERFS_MAX_MINOR_CAPPED, - GFP_KERNEL); + //minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); + minor = ida_simple_get(&binderfs_minors, 0, BINDERFS_MAX_MINOR, GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); if (minor < 0) { ret = minor; @@ -446,10 +364,12 @@ static int binderfs_binder_ctl_create(struct super_block *sb) inode->i_private = device; info->control_dentry = dentry; d_add(dentry, inode); + inode_unlock(d_inode(root)); return 0; out: + inode_unlock(d_inode(root)); kfree(device); iput(inode); @@ -464,9 +384,12 @@ static const struct inode_operations binderfs_dir_inode_operations = { static int binderfs_fill_super(struct super_block *sb, void *data, int silent) { - int ret; struct binderfs_info *info; + int ret = -ENOMEM; struct inode *inode = NULL; + struct ipc_namespace *ipc_ns = sb->s_fs_info; + + get_ipc_ns(ipc_ns); sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; @@ -488,17 +411,11 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_op = &binderfs_super_ops; sb->s_time_gran = 1; - sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); - if (!sb->s_fs_info) - return -ENOMEM; - info = sb->s_fs_info; - - info->ipc_ns = get_ipc_ns_exported_compat(current->nsproxy->ipc_ns); - - ret = binderfs_parse_mount_opts(data, &info->mount_opts); - if (ret) - return ret; + info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); + if (!info) + goto err_without_dentry; + info->ipc_ns = ipc_ns; info->root_gid = make_kgid(sb->s_user_ns, 0); if (!gid_valid(info->root_gid)) info->root_gid = GLOBAL_ROOT_GID; @@ -506,9 +423,11 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) if (!uid_valid(info->root_uid)) info->root_uid = GLOBAL_ROOT_UID; + sb->s_fs_info = info; + inode = new_inode(sb); if (!inode) - return -ENOMEM; + goto err_without_dentry; inode->i_ino = FIRST_INODE; inode->i_fop = &simple_dir_operations; @@ -519,28 +438,94 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_root = d_make_root(inode); if (!sb->s_root) - return -ENOMEM; + goto err_without_dentry; + + ret = binderfs_binder_ctl_create(sb); + if (ret) + goto err_with_dentry; + + return 0; + +err_with_dentry: + dput(sb->s_root); + sb->s_root = NULL; - return binderfs_binder_ctl_create(sb); +err_without_dentry: + put_ipc_ns(ipc_ns); + iput(inode); + kfree(info); + + return ret; +} + +static int binderfs_test_super(struct super_block *sb, void *data) +{ + struct binderfs_info *info = sb->s_fs_info; + + if (info) + return info->ipc_ns == data; + + return 0; +} + +static int binderfs_set_super(struct super_block *sb, void *data) +{ + sb->s_fs_info = data; + return set_anon_super(sb, NULL); } static struct dentry *binderfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_nodev(fs_type, flags, data, binderfs_fill_super); + +/* + sget_userns function is removed in kernel version higher than 5.3 + This will fail the compiling of binderfs, since currently used binderfs is + backport from kernel 4.21 + In the latest binderfs code, these codes are changed too. + So, this is a hotfix which enabing CIC on kernel high than 5.3.0 + (TODO) Update binderfs code +*/ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + return mount_nodev(fs_type, flags, data, binderfs_fill_super); + +#else + struct super_block *sb; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + + if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super, + flags, ipc_ns->user_ns, ipc_ns); + if (IS_ERR(sb)) + return ERR_CAST(sb); + + if (!sb->s_root) { + int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0); + if (ret) { + deactivate_locked_super(sb); + return ERR_PTR(ret); + } + + sb->s_flags |= SB_ACTIVE; + } + + return dget(sb->s_root); + +#endif } static void binderfs_kill_super(struct super_block *sb) { struct binderfs_info *info = sb->s_fs_info; - kill_litter_super(sb); - if (info && info->ipc_ns) put_ipc_ns(info->ipc_ns); kfree(info); + kill_litter_super(sb); } static struct file_system_type binder_fs_type = { @@ -550,7 +535,7 @@ static struct file_system_type binder_fs_type = { .fs_flags = FS_USERNS_MOUNT, }; -int __init init_binderfs(void) +static int __init init_binderfs(void) { int ret; @@ -566,5 +551,16 @@ int __init init_binderfs(void) return ret; } + binderfs_mnt = kern_mount(&binder_fs_type); + if (IS_ERR(binderfs_mnt)) { + ret = PTR_ERR(binderfs_mnt); + binderfs_mnt = NULL; + unregister_filesystem(&binder_fs_type); + unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR); + } + return ret; } + +module_init(init_binderfs); +MODULE_LICENSE("GPL v2"); diff --git a/binder/deps.c b/binder/deps.c index dfc14a6..b1a108c 100644 --- a/binder/deps.c +++ b/binder/deps.c @@ -1,4 +1,15 @@ -#include "deps.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 static struct vm_struct *(*get_vm_area_ptr)(unsigned long, unsigned long) = NULL; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) @@ -20,8 +31,6 @@ static int (*security_binder_transfer_binder_ptr)(struct task_struct *from, stru static int (*security_binder_transfer_file_ptr)(struct task_struct *from, struct task_struct *to, struct file *file) = NULL; static void (*mmput_async_ptr)(struct mm_struct *) = NULL; static void (*put_ipc_ns_ptr)(struct ipc_namespace *) = NULL; -static int (*task_work_add_ptr)(struct task_struct *, struct callback_head *, bool) = NULL; -static struct ipc_namespace *(*show_init_ipc_ns_ptr)(void) = NULL; struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { @@ -136,115 +145,17 @@ void mmput_async(struct mm_struct *mm) mmput_async_ptr(mm); } -void put_ipc_ns(struct ipc_namespace *ns) -{ - if (!put_ipc_ns_ptr) - put_ipc_ns_ptr = kallsyms_lookup_name("put_ipc_ns"); - if(put_ipc_ns_ptr) - put_ipc_ns_ptr(ns); -} - -int task_work_add(struct task_struct *task, struct callback_head *twork, bool notify) -{ - if(!task_work_add_ptr) - task_work_add_ptr = kallsyms_lookup_name("task_work_add"); - if(task_work_add_ptr) - return task_work_add_ptr(task, twork, notify); - else - return -1; -} - -struct ipc_namespace *show_init_ipc_ns_compat(void) -{ - if(!show_init_ipc_ns_ptr) - show_init_ipc_ns_ptr = kallsyms_lookup_name("show_init_ipc_ns"); - if(show_init_ipc_ns_ptr) - return show_init_ipc_ns_ptr(); - else - return NULL; -} - -static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) -{ - __clear_bit(fd, fdt->open_fds); - __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits); -} - -static void __put_unused_fd(struct files_struct *files, unsigned int fd) -{ - struct fdtable *fdt = files_fdtable(files); - __clear_open_fd(fd, fdt); - if (fd < files->next_fd) - files->next_fd = fd; -} - -static int close_fd_get_file_backport(unsigned int fd, struct file **res) +bool is_binderfs_device(const struct inode *inode) { - struct files_struct *files = current->files; - struct file *file; - struct fdtable *fdt; + if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC) + return true; - spin_lock(&files->file_lock); - fdt = files_fdtable(files); - if (fd >= fdt->max_fds) - goto out_unlock; - file = fdt->fd[fd]; - if (!file) - goto out_unlock; - rcu_assign_pointer(fdt->fd[fd], NULL); - __put_unused_fd(files, fd); - spin_unlock(&files->file_lock); - get_file(file); - *res = file; - return filp_close(file, files); - -out_unlock: - spin_unlock(&files->file_lock); - *res = NULL; - return -ENOENT; -} - -int __close_fd_get_file_compat(unsigned int fd, struct file **res) -{ - int (*close_fd_get_file_ptr)(unsigned int fd, struct file **res) = NULL; - - close_fd_get_file_ptr = kallsyms_lookup_name("__close_fd_get_file"); - if(close_fd_get_file_ptr) - return close_fd_get_file_ptr(fd, res); - else - return close_fd_get_file_backport(fd, res); -} - -struct ipc_namespace *get_ipc_ns_exported_compat(struct ipc_namespace *ns) -{ - struct ipc_namespace *(*get_ipc_ns_exported_ptr)(struct ipc_namespace *ns) = NULL; - - get_ipc_ns_exported_ptr = kallsyms_lookup_name("get_ipc_ns_exported"); - if(get_ipc_ns_exported_ptr) - return get_ipc_ns_exported_ptr(ns); - else - return get_ipc_ns(ns); + return false; } -int ida_alloc_max_compat(struct ida *ida, unsigned int max, gfp_t gfp) -{ - int (*ida_alloc_max_ptr)(struct ida *, unsigned int, gfp_t) = NULL; - - ida_alloc_max_ptr = kallsyms_lookup_name("ida_alloc_max"); - if(ida_alloc_max_ptr) - return ida_alloc_max_ptr(ida, max, gfp); - else - return ida_simple_get(ida, 0, max, gfp); - -} - -void ida_free_compat(struct ida *ida, unsigned int id) +void put_ipc_ns(struct ipc_namespace *ns) { - void (*ida_free_ptr)(struct ida *, unsigned int) = NULL; - ida_free_ptr = kallsyms_lookup_name("ida_free"); - - if(ida_free_ptr) - return ida_free_ptr(ida, id); - else - return ida_simple_remove(ida, id); + if (!put_ipc_ns_ptr) + put_ipc_ns_ptr = kallsyms_lookup_name("put_ipc_ns"); + put_ipc_ns_ptr(ns); } diff --git a/binder/deps.h b/binder/deps.h deleted file mode 100644 index 2665d3e..0000000 --- a/binder/deps.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef _DEPS_H -#define _DEPS_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) - void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size); -#else - void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details); -#endif -int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); -struct files_struct *get_files_struct(struct task_struct *task); -void put_files_struct(struct files_struct *files); -struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags); -int __alloc_fd(struct files_struct *files, unsigned start, unsigned end, unsigned flags); -void __fd_install(struct files_struct *files, unsigned int fd, struct file *file); -int __close_fd(struct files_struct *files, unsigned int fd); -int can_nice(const struct task_struct *p, const int nice); -int security_binder_set_context_mgr(struct task_struct *mgr); -int security_binder_transaction(struct task_struct *from, struct task_struct *to); -int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to); -int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file); -void mmput_async(struct mm_struct *mm); -void put_ipc_ns(struct ipc_namespace *ns); -int task_work_add(struct task_struct *task, struct callback_head *twork, bool notify); -int __close_fd_get_file_compat(unsigned int fd, struct file **res); -struct ipc_namespace *show_init_ipc_ns_compat(void); -struct ipc_namespace *get_ipc_ns_exported_compat(struct ipc_namespace *ns); -int ida_alloc_max_compat(struct ida *ida, unsigned int max, gfp_t gfp); -void ida_free_compat(struct ida *, unsigned int id); - -#endif From 14f8fb10f22b56bf2399a29955261d0aebb78a88 Mon Sep 17 00:00:00 2001 From: Fengqian Gao Date: Thu, 5 Mar 2020 21:32:13 -0500 Subject: [PATCH 7/8] Add compilation/linking flags for security reason Security flags added and remove mac80211_hwsim folder Tracked-On: OAM-90318 --- ashmem/Makefile | 20 +- binder/Makefile | 26 +- mac80211_hwsim/Makefile | 6 - mac80211_hwsim/mac80211_hwsim.c | 3590 ------------------------------- mac80211_hwsim/mac80211_hwsim.h | 174 -- 5 files changed, 39 insertions(+), 3777 deletions(-) delete mode 100644 mac80211_hwsim/Makefile delete mode 100644 mac80211_hwsim/mac80211_hwsim.c delete mode 100644 mac80211_hwsim/mac80211_hwsim.h diff --git a/ashmem/Makefile b/ashmem/Makefile index 1193713..8d089fc 100644 --- a/ashmem/Makefile +++ b/ashmem/Makefile @@ -1,4 +1,18 @@ -ccflags-y += -I$(src) -Wno-error=implicit-int -Wno-int-conversion +EXTRA_CFLAGS += -I$(src) -Wno-error=implicit-int -Wno-int-conversion +EXTRA_CFLAGS += -Wall -Wformat -Wformat-security -Werror=format-security -D_FORTIFY_SOURCE=2 -Wl,-z,relro -Wl,-z,now -Wl,-z,noexecstack + +GCCVERBG8 := $(shell expr `gcc --version | grep ^gcc | cut -d " " -f 4` \>= 8.0) +GCCVERBG4 := $(shell expr `gcc --version | grep ^gcc | cut -d " " -f 4` \>= 4.9) + +ifeq "$(GCCVERBG8)" "1" + EXTRA_CFLAGS += -fstack-clash-protection -fcf-protection=full +endif + +ifeq "$(GCCVERBG4)" "1" + EXTRA_CFLAGS += -fstack-protector-strong +endif + + obj-m := ashmem_module.o ashmem_module-y := deps.o ashmem.o @@ -8,7 +22,7 @@ all: $(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD install: - cp ashmem_module.ko $(DESTDIR)/ + insmod ashmem_module.ko clean: - rm -rf deps.h *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions + rm -rf deps.h *.o *.mod.c *.symvers *.order .*.cmd .tmp_versions diff --git a/binder/Makefile b/binder/Makefile index b4ac7d8..f1f5373 100644 --- a/binder/Makefile +++ b/binder/Makefile @@ -1,4 +1,24 @@ -ccflags-y += -I$(src) -Wno-int-conversion -Wno-error=incompatible-pointer-types -DCONFIG_ANDROID_BINDER_DEVICES="\"hostbinder,hostvndbinder,hosthwbinder\"" -DCONFIG_ANDROID_BINDERFS +EXTRA_CFLAGS += -I$(src) -Wno-int-conversion -Wno-error=incompatible-pointer-types +EXTRA_CFLAGS += -Wall -Wformat -Wformat-security -Werror=format-security -D_FORTIFY_SOURCE=2 -Wl,-z,relro -Wl,-z,now -Wl,-z,noexecstack + +GCCVERBG8 := $(shell expr `gcc --version | grep ^gcc | cut -d " " -f 4` \>= 8.0) +GCCVERBG4 := $(shell expr `gcc --version | grep ^gcc | cut -d " " -f 4` \>= 4.9) + +ifeq "$(GCCVERBG8)" "1" + EXTRA_CFLAGS += -fstack-clash-protection -fcf-protection=full +endif + +ifeq "$(GCCVERBG4)" "1" + EXTRA_CFLAGS += -fstack-protector-strong +endif + +ifndef CONFIG_ANDROID_BINDER_DEVICES + EXTRA_CFLAGS += -DCONFIG_ANDROID_BINDER_DEVICES="\"hostbinder,hostvndbinder,hosthwbinder\"" +endif +ifndef CONFIG_ANDROID_BINDERFS + EXTRA_CFLAGS += -DCONFIG_ANDROID_BINDERFS +endif + obj-m := binderfs_module.o binder_module.o binder_module-y := deps.o binder.o binder_alloc.o binderfs_module-y := deps.o binderfs.o @@ -9,9 +29,7 @@ all: $(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD install: - cp binder_module.ko $(DESTDIR)/ - cp binderfs_module.ko $(DESTDIR)/ insmod binder_module.ko insmod binderfs_module.ko clean: - rm -rf *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions *.ur-safe + rm -rf *.o *.mod.c *.symvers *.order .*.cmd .tmp_versions *.ur-safe diff --git a/mac80211_hwsim/Makefile b/mac80211_hwsim/Makefile deleted file mode 100644 index 16b33f7..0000000 --- a/mac80211_hwsim/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -obj-m := mac80211_hwsim.o - -all: - $(MAKE) -C "/lib/modules/$(shell uname -r)/build" M=$(shell pwd) modules -clean: - rm -rf deps.h *.o *.o.ur-safe *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions diff --git a/mac80211_hwsim/mac80211_hwsim.c b/mac80211_hwsim/mac80211_hwsim.c deleted file mode 100644 index 6467ffa..0000000 --- a/mac80211_hwsim/mac80211_hwsim.c +++ /dev/null @@ -1,3590 +0,0 @@ -/* - * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 - * Copyright (c) 2008, Jouni Malinen - * Copyright (c) 2011, Javier Lopez - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* - * TODO: - * - Add TSF sync and fix IBSS beacon transmission by adding - * competition for "air time" at TBTT - * - RX filtering based on filter configuration (data->rx_filter) - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "mac80211_hwsim.h" - -#define WARN_QUEUE 100 -#define MAX_QUEUE 200 - -MODULE_AUTHOR("Jouni Malinen"); -MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); -MODULE_LICENSE("GPL"); - -static int radios = 2; -module_param(radios, int, 0444); -MODULE_PARM_DESC(radios, "Number of simulated radios"); - -static int channels = 1; -module_param(channels, int, 0444); -MODULE_PARM_DESC(channels, "Number of concurrent channels"); - -static bool paged_rx = false; -module_param(paged_rx, bool, 0644); -MODULE_PARM_DESC(paged_rx, "Use paged SKBs for RX instead of linear ones"); - -static bool rctbl = false; -module_param(rctbl, bool, 0444); -MODULE_PARM_DESC(rctbl, "Handle rate control table"); - -static bool support_p2p_device = true; -module_param(support_p2p_device, bool, 0444); -MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type"); - -/** - * enum hwsim_regtest - the type of regulatory tests we offer - * - * These are the different values you can use for the regtest - * module parameter. This is useful to help test world roaming - * and the driver regulatory_hint() call and combinations of these. - * If you want to do specific alpha2 regulatory domain tests simply - * use the userspace regulatory request as that will be respected as - * well without the need of this module parameter. This is designed - * only for testing the driver regulatory request, world roaming - * and all possible combinations. - * - * @HWSIM_REGTEST_DISABLED: No regulatory tests are performed, - * this is the default value. - * @HWSIM_REGTEST_DRIVER_REG_FOLLOW: Used for testing the driver regulatory - * hint, only one driver regulatory hint will be sent as such the - * secondary radios are expected to follow. - * @HWSIM_REGTEST_DRIVER_REG_ALL: Used for testing the driver regulatory - * request with all radios reporting the same regulatory domain. - * @HWSIM_REGTEST_DIFF_COUNTRY: Used for testing the drivers calling - * different regulatory domains requests. Expected behaviour is for - * an intersection to occur but each device will still use their - * respective regulatory requested domains. Subsequent radios will - * use the resulting intersection. - * @HWSIM_REGTEST_WORLD_ROAM: Used for testing the world roaming. We accomplish - * this by using a custom beacon-capable regulatory domain for the first - * radio. All other device world roam. - * @HWSIM_REGTEST_CUSTOM_WORLD: Used for testing the custom world regulatory - * domain requests. All radios will adhere to this custom world regulatory - * domain. - * @HWSIM_REGTEST_CUSTOM_WORLD_2: Used for testing 2 custom world regulatory - * domain requests. The first radio will adhere to the first custom world - * regulatory domain, the second one to the second custom world regulatory - * domain. All other devices will world roam. - * @HWSIM_REGTEST_STRICT_FOLLOW_: Used for testing strict regulatory domain - * settings, only the first radio will send a regulatory domain request - * and use strict settings. The rest of the radios are expected to follow. - * @HWSIM_REGTEST_STRICT_ALL: Used for testing strict regulatory domain - * settings. All radios will adhere to this. - * @HWSIM_REGTEST_STRICT_AND_DRIVER_REG: Used for testing strict regulatory - * domain settings, combined with secondary driver regulatory domain - * settings. The first radio will get a strict regulatory domain setting - * using the first driver regulatory request and the second radio will use - * non-strict settings using the second driver regulatory request. All - * other devices should follow the intersection created between the - * first two. - * @HWSIM_REGTEST_ALL: Used for testing every possible mix. You will need - * at least 6 radios for a complete test. We will test in this order: - * 1 - driver custom world regulatory domain - * 2 - second custom world regulatory domain - * 3 - first driver regulatory domain request - * 4 - second driver regulatory domain request - * 5 - strict regulatory domain settings using the third driver regulatory - * domain request - * 6 and on - should follow the intersection of the 3rd, 4rth and 5th radio - * regulatory requests. - */ -enum hwsim_regtest { - HWSIM_REGTEST_DISABLED = 0, - HWSIM_REGTEST_DRIVER_REG_FOLLOW = 1, - HWSIM_REGTEST_DRIVER_REG_ALL = 2, - HWSIM_REGTEST_DIFF_COUNTRY = 3, - HWSIM_REGTEST_WORLD_ROAM = 4, - HWSIM_REGTEST_CUSTOM_WORLD = 5, - HWSIM_REGTEST_CUSTOM_WORLD_2 = 6, - HWSIM_REGTEST_STRICT_FOLLOW = 7, - HWSIM_REGTEST_STRICT_ALL = 8, - HWSIM_REGTEST_STRICT_AND_DRIVER_REG = 9, - HWSIM_REGTEST_ALL = 10, -}; - -/* Set to one of the HWSIM_REGTEST_* values above */ -static int regtest = HWSIM_REGTEST_DISABLED; -module_param(regtest, int, 0444); -MODULE_PARM_DESC(regtest, "The type of regulatory test we want to run"); - -static const char *hwsim_alpha2s[] = { - "FI", - "AL", - "US", - "DE", - "JP", - "AL", -}; - -static const struct ieee80211_regdomain hwsim_world_regdom_custom_01 = { - .n_reg_rules = 4, - .alpha2 = "99", - .reg_rules = { - REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), - REG_RULE(2484-10, 2484+10, 40, 0, 20, 0), - REG_RULE(5150-10, 5240+10, 40, 0, 30, 0), - REG_RULE(5745-10, 5825+10, 40, 0, 30, 0), - } -}; - -static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = { - .n_reg_rules = 2, - .alpha2 = "99", - .reg_rules = { - REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), - REG_RULE(5725-10, 5850+10, 40, 0, 30, - NL80211_RRF_NO_IR), - } -}; - -static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = { - &hwsim_world_regdom_custom_01, - &hwsim_world_regdom_custom_02, -}; - -struct hwsim_vif_priv { - u32 magic; - u8 bssid[ETH_ALEN]; - bool assoc; - bool bcn_en; - u16 aid; -}; - -#define HWSIM_VIF_MAGIC 0x69537748 - -static inline void hwsim_check_magic(struct ieee80211_vif *vif) -{ - struct hwsim_vif_priv *vp = (void *)vif->drv_priv; - WARN(vp->magic != HWSIM_VIF_MAGIC, - "Invalid VIF (%p) magic %#x, %pM, %d/%d\n", - vif, vp->magic, vif->addr, vif->type, vif->p2p); -} - -static inline void hwsim_set_magic(struct ieee80211_vif *vif) -{ - struct hwsim_vif_priv *vp = (void *)vif->drv_priv; - vp->magic = HWSIM_VIF_MAGIC; -} - -static inline void hwsim_clear_magic(struct ieee80211_vif *vif) -{ - struct hwsim_vif_priv *vp = (void *)vif->drv_priv; - vp->magic = 0; -} - -struct hwsim_sta_priv { - u32 magic; -}; - -#define HWSIM_STA_MAGIC 0x6d537749 - -static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta) -{ - struct hwsim_sta_priv *sp = (void *)sta->drv_priv; - WARN_ON(sp->magic != HWSIM_STA_MAGIC); -} - -static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta) -{ - struct hwsim_sta_priv *sp = (void *)sta->drv_priv; - sp->magic = HWSIM_STA_MAGIC; -} - -static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta) -{ - struct hwsim_sta_priv *sp = (void *)sta->drv_priv; - sp->magic = 0; -} - -struct hwsim_chanctx_priv { - u32 magic; -}; - -#define HWSIM_CHANCTX_MAGIC 0x6d53774a - -static inline void hwsim_check_chanctx_magic(struct ieee80211_chanctx_conf *c) -{ - struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; - WARN_ON(cp->magic != HWSIM_CHANCTX_MAGIC); -} - -static inline void hwsim_set_chanctx_magic(struct ieee80211_chanctx_conf *c) -{ - struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; - cp->magic = HWSIM_CHANCTX_MAGIC; -} - -static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c) -{ - struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; - cp->magic = 0; -} - -static unsigned int hwsim_net_id; - -static int hwsim_netgroup; - -struct hwsim_net { - int netgroup; - u32 wmediumd; -}; - -static inline int hwsim_net_get_netgroup(struct net *net) -{ - struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); - - return hwsim_net->netgroup; -} - -static inline void hwsim_net_set_netgroup(struct net *net) -{ - struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); - - hwsim_net->netgroup = hwsim_netgroup++; -} - -static inline u32 hwsim_net_get_wmediumd(struct net *net) -{ - struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); - - return hwsim_net->wmediumd; -} - -static inline void hwsim_net_set_wmediumd(struct net *net, u32 portid) -{ - struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); - - hwsim_net->wmediumd = portid; -} - -static struct class *hwsim_class; - -static struct net_device *hwsim_mon; /* global monitor netdev */ - -#define CHAN2G(_freq) { \ - .band = NL80211_BAND_2GHZ, \ - .center_freq = (_freq), \ - .hw_value = (_freq), \ - .max_power = 20, \ -} - -#define CHAN5G(_freq) { \ - .band = NL80211_BAND_5GHZ, \ - .center_freq = (_freq), \ - .hw_value = (_freq), \ - .max_power = 20, \ -} - -static const struct ieee80211_channel hwsim_channels_2ghz[] = { - CHAN2G(2412), /* Channel 1 */ - CHAN2G(2417), /* Channel 2 */ - CHAN2G(2422), /* Channel 3 */ - CHAN2G(2427), /* Channel 4 */ - CHAN2G(2432), /* Channel 5 */ - CHAN2G(2437), /* Channel 6 */ - CHAN2G(2442), /* Channel 7 */ - CHAN2G(2447), /* Channel 8 */ - CHAN2G(2452), /* Channel 9 */ - CHAN2G(2457), /* Channel 10 */ - CHAN2G(2462), /* Channel 11 */ - CHAN2G(2467), /* Channel 12 */ - CHAN2G(2472), /* Channel 13 */ - CHAN2G(2484), /* Channel 14 */ -}; - -static const struct ieee80211_channel hwsim_channels_5ghz[] = { - CHAN5G(5180), /* Channel 36 */ - CHAN5G(5200), /* Channel 40 */ - CHAN5G(5220), /* Channel 44 */ - CHAN5G(5240), /* Channel 48 */ - - CHAN5G(5260), /* Channel 52 */ - CHAN5G(5280), /* Channel 56 */ - CHAN5G(5300), /* Channel 60 */ - CHAN5G(5320), /* Channel 64 */ - - CHAN5G(5500), /* Channel 100 */ - CHAN5G(5520), /* Channel 104 */ - CHAN5G(5540), /* Channel 108 */ - CHAN5G(5560), /* Channel 112 */ - CHAN5G(5580), /* Channel 116 */ - CHAN5G(5600), /* Channel 120 */ - CHAN5G(5620), /* Channel 124 */ - CHAN5G(5640), /* Channel 128 */ - CHAN5G(5660), /* Channel 132 */ - CHAN5G(5680), /* Channel 136 */ - CHAN5G(5700), /* Channel 140 */ - - CHAN5G(5745), /* Channel 149 */ - CHAN5G(5765), /* Channel 153 */ - CHAN5G(5785), /* Channel 157 */ - CHAN5G(5805), /* Channel 161 */ - CHAN5G(5825), /* Channel 165 */ - CHAN5G(5845), /* Channel 169 */ -}; - -static const struct ieee80211_rate hwsim_rates[] = { - { .bitrate = 10 }, - { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, - { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, - { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, - { .bitrate = 60 }, - { .bitrate = 90 }, - { .bitrate = 120 }, - { .bitrate = 180 }, - { .bitrate = 240 }, - { .bitrate = 360 }, - { .bitrate = 480 }, - { .bitrate = 540 } -}; - -#define OUI_QCA 0x001374 -#define QCA_NL80211_SUBCMD_TEST 1 -enum qca_nl80211_vendor_subcmds { - QCA_WLAN_VENDOR_ATTR_TEST = 8, - QCA_WLAN_VENDOR_ATTR_MAX = QCA_WLAN_VENDOR_ATTR_TEST -}; - -static const struct nla_policy -hwsim_vendor_test_policy[QCA_WLAN_VENDOR_ATTR_MAX + 1] = { - [QCA_WLAN_VENDOR_ATTR_MAX] = { .type = NLA_U32 }, -}; - -static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy, - struct wireless_dev *wdev, - const void *data, int data_len) -{ - struct sk_buff *skb; - struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_MAX + 1]; - int err; - u32 val; - - err = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len, - hwsim_vendor_test_policy, NULL); - if (err) - return err; - if (!tb[QCA_WLAN_VENDOR_ATTR_TEST]) - return -EINVAL; - val = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_TEST]); - wiphy_debug(wiphy, "%s: test=%u\n", __func__, val); - - /* Send a vendor event as a test. Note that this would not normally be - * done within a command handler, but rather, based on some other - * trigger. For simplicity, this command is used to trigger the event - * here. - * - * event_idx = 0 (index in mac80211_hwsim_vendor_commands) - */ - skb = cfg80211_vendor_event_alloc(wiphy, wdev, 100, 0, GFP_KERNEL); - if (skb) { - /* skb_put() or nla_put() will fill up data within - * NL80211_ATTR_VENDOR_DATA. - */ - - /* Add vendor data */ - nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1); - - /* Send the event - this will call nla_nest_end() */ - cfg80211_vendor_event(skb, GFP_KERNEL); - } - - /* Send a response to the command */ - skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 10); - if (!skb) - return -ENOMEM; - - /* skb_put() or nla_put() will fill up data within - * NL80211_ATTR_VENDOR_DATA - */ - nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 2); - - return cfg80211_vendor_cmd_reply(skb); -} - -static struct wiphy_vendor_command mac80211_hwsim_vendor_commands[] = { - { - .info = { .vendor_id = OUI_QCA, - .subcmd = QCA_NL80211_SUBCMD_TEST }, - .flags = WIPHY_VENDOR_CMD_NEED_NETDEV, - .doit = mac80211_hwsim_vendor_cmd_test, - } -}; - -/* Advertise support vendor specific events */ -static const struct nl80211_vendor_cmd_info mac80211_hwsim_vendor_events[] = { - { .vendor_id = OUI_QCA, .subcmd = 1 }, -}; - -static const struct ieee80211_iface_limit hwsim_if_limits[] = { - { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, - { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_GO) }, - /* must be last, see hwsim_if_comb */ - { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) } -}; - -static const struct ieee80211_iface_combination hwsim_if_comb[] = { - { - .limits = hwsim_if_limits, - /* remove the last entry which is P2P_DEVICE */ - .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1, - .max_interfaces = 2048, - .num_different_channels = 1, - .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80) | - BIT(NL80211_CHAN_WIDTH_160), - }, -}; - -static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = { - { - .limits = hwsim_if_limits, - .n_limits = ARRAY_SIZE(hwsim_if_limits), - .max_interfaces = 2048, - .num_different_channels = 1, - .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80) | - BIT(NL80211_CHAN_WIDTH_160), - }, -}; - -static spinlock_t hwsim_radio_lock; -static LIST_HEAD(hwsim_radios); -static int hwsim_radio_idx; - -static struct platform_driver mac80211_hwsim_driver = { - .driver = { - .name = "mac80211_hwsim", - }, -}; - -struct mac80211_hwsim_data { - struct list_head list; - struct ieee80211_hw *hw; - struct device *dev; - struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; - struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)]; - struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)]; - struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; - struct ieee80211_iface_combination if_combination; - - struct mac_address addresses[2]; - int channels, idx; - bool use_chanctx; - bool destroy_on_close; - struct work_struct destroy_work; - u32 portid; - char alpha2[2]; - const struct ieee80211_regdomain *regd; - - struct ieee80211_channel *tmp_chan; - struct ieee80211_channel *roc_chan; - u32 roc_duration; - struct delayed_work roc_start; - struct delayed_work roc_done; - struct delayed_work hw_scan; - struct cfg80211_scan_request *hw_scan_request; - struct ieee80211_vif *hw_scan_vif; - int scan_chan_idx; - u8 scan_addr[ETH_ALEN]; - struct { - struct ieee80211_channel *channel; - unsigned long next_start, start, end; - } survey_data[ARRAY_SIZE(hwsim_channels_2ghz) + - ARRAY_SIZE(hwsim_channels_5ghz)]; - - struct ieee80211_channel *channel; - u64 beacon_int /* beacon interval in us */; - unsigned int rx_filter; - bool started, idle, scanning; - struct mutex mutex; - struct tasklet_hrtimer beacon_timer; - enum ps_mode { - PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL - } ps; - bool ps_poll_pending; - struct dentry *debugfs; - - uintptr_t pending_cookie; - struct sk_buff_head pending; /* packets pending */ - /* - * Only radios in the same group can communicate together (the - * channel has to match too). Each bit represents a group. A - * radio can be in more than one group. - */ - u64 group; - - /* group shared by radios created in the same netns */ - int netgroup; - /* wmediumd portid responsible for netgroup of this radio */ - u32 wmediumd; - - /* difference between this hw's clock and the real clock, in usecs */ - s64 tsf_offset; - s64 bcn_delta; - /* absolute beacon transmission time. Used to cover up "tx" delay. */ - u64 abs_bcn_ts; - - /* Stats */ - u64 tx_pkts; - u64 rx_pkts; - u64 tx_bytes; - u64 rx_bytes; - u64 tx_dropped; - u64 tx_failed; -}; - - -struct hwsim_radiotap_hdr { - struct ieee80211_radiotap_header hdr; - __le64 rt_tsft; - u8 rt_flags; - u8 rt_rate; - __le16 rt_channel; - __le16 rt_chbitmask; -} __packed; - -struct hwsim_radiotap_ack_hdr { - struct ieee80211_radiotap_header hdr; - u8 rt_flags; - u8 pad; - __le16 rt_channel; - __le16 rt_chbitmask; -} __packed; - -/* MAC80211_HWSIM netlink family */ -static struct genl_family hwsim_genl_family; - -enum hwsim_multicast_groups { - HWSIM_MCGRP_CONFIG, -}; - -static const struct genl_multicast_group hwsim_mcgrps[] = { - [HWSIM_MCGRP_CONFIG] = { .name = "config", }, -}; - -/* MAC80211_HWSIM netlink policy */ - -static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { - [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN }, - [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN }, - [HWSIM_ATTR_FRAME] = { .type = NLA_BINARY, - .len = IEEE80211_MAX_DATA_LEN }, - [HWSIM_ATTR_FLAGS] = { .type = NLA_U32 }, - [HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 }, - [HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 }, - [HWSIM_ATTR_TX_INFO] = { .type = NLA_UNSPEC, - .len = IEEE80211_TX_MAX_RATES * - sizeof(struct hwsim_tx_rate)}, - [HWSIM_ATTR_COOKIE] = { .type = NLA_U64 }, - [HWSIM_ATTR_CHANNELS] = { .type = NLA_U32 }, - [HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 }, - [HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 }, - [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 }, - [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG }, - [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG }, - [HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE] = { .type = NLA_FLAG }, - [HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING }, - [HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG }, - [HWSIM_ATTR_FREQ] = { .type = NLA_U32 }, -}; - -static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, - struct sk_buff *skb, - struct ieee80211_channel *chan); - -/* sysfs attributes */ -static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) -{ - struct mac80211_hwsim_data *data = dat; - struct hwsim_vif_priv *vp = (void *)vif->drv_priv; - struct sk_buff *skb; - struct ieee80211_pspoll *pspoll; - - if (!vp->assoc) - return; - - wiphy_debug(data->hw->wiphy, - "%s: send PS-Poll to %pM for aid %d\n", - __func__, vp->bssid, vp->aid); - - skb = dev_alloc_skb(sizeof(*pspoll)); - if (!skb) - return; - pspoll = skb_put(skb, sizeof(*pspoll)); - pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | - IEEE80211_STYPE_PSPOLL | - IEEE80211_FCTL_PM); - pspoll->aid = cpu_to_le16(0xc000 | vp->aid); - memcpy(pspoll->bssid, vp->bssid, ETH_ALEN); - memcpy(pspoll->ta, mac, ETH_ALEN); - - rcu_read_lock(); - mac80211_hwsim_tx_frame(data->hw, skb, - rcu_dereference(vif->chanctx_conf)->def.chan); - rcu_read_unlock(); -} - -static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, - struct ieee80211_vif *vif, int ps) -{ - struct hwsim_vif_priv *vp = (void *)vif->drv_priv; - struct sk_buff *skb; - struct ieee80211_hdr *hdr; - - if (!vp->assoc) - return; - - wiphy_debug(data->hw->wiphy, - "%s: send data::nullfunc to %pM ps=%d\n", - __func__, vp->bssid, ps); - - skb = dev_alloc_skb(sizeof(*hdr)); - if (!skb) - return; - hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN); - hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | - IEEE80211_STYPE_NULLFUNC | - (ps ? IEEE80211_FCTL_PM : 0)); - hdr->duration_id = cpu_to_le16(0); - memcpy(hdr->addr1, vp->bssid, ETH_ALEN); - memcpy(hdr->addr2, mac, ETH_ALEN); - memcpy(hdr->addr3, vp->bssid, ETH_ALEN); - - rcu_read_lock(); - mac80211_hwsim_tx_frame(data->hw, skb, - rcu_dereference(vif->chanctx_conf)->def.chan); - rcu_read_unlock(); -} - - -static void hwsim_send_nullfunc_ps(void *dat, u8 *mac, - struct ieee80211_vif *vif) -{ - struct mac80211_hwsim_data *data = dat; - hwsim_send_nullfunc(data, mac, vif, 1); -} - -static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac, - struct ieee80211_vif *vif) -{ - struct mac80211_hwsim_data *data = dat; - hwsim_send_nullfunc(data, mac, vif, 0); -} - -static int hwsim_fops_ps_read(void *dat, u64 *val) -{ - struct mac80211_hwsim_data *data = dat; - *val = data->ps; - return 0; -} - -static int hwsim_fops_ps_write(void *dat, u64 val) -{ - struct mac80211_hwsim_data *data = dat; - enum ps_mode old_ps; - - if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL && - val != PS_MANUAL_POLL) - return -EINVAL; - - old_ps = data->ps; - data->ps = val; - - local_bh_disable(); - if (val == PS_MANUAL_POLL) { - ieee80211_iterate_active_interfaces_atomic( - data->hw, IEEE80211_IFACE_ITER_NORMAL, - hwsim_send_ps_poll, data); - data->ps_poll_pending = true; - } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { - ieee80211_iterate_active_interfaces_atomic( - data->hw, IEEE80211_IFACE_ITER_NORMAL, - hwsim_send_nullfunc_ps, data); - } else if (old_ps != PS_DISABLED && val == PS_DISABLED) { - ieee80211_iterate_active_interfaces_atomic( - data->hw, IEEE80211_IFACE_ITER_NORMAL, - hwsim_send_nullfunc_no_ps, data); - } - local_bh_enable(); - - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write, - "%llu\n"); - -static int hwsim_write_simulate_radar(void *dat, u64 val) -{ - struct mac80211_hwsim_data *data = dat; - - ieee80211_radar_detected(data->hw); - - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(hwsim_simulate_radar, NULL, - hwsim_write_simulate_radar, "%llu\n"); - -static int hwsim_fops_group_read(void *dat, u64 *val) -{ - struct mac80211_hwsim_data *data = dat; - *val = data->group; - return 0; -} - -static int hwsim_fops_group_write(void *dat, u64 val) -{ - struct mac80211_hwsim_data *data = dat; - data->group = val; - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group, - hwsim_fops_group_read, hwsim_fops_group_write, - "%llx\n"); - -static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - /* TODO: allow packet injection */ - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -static inline u64 mac80211_hwsim_get_tsf_raw(void) -{ - return ktime_to_us(ktime_get_real()); -} - -static __le64 __mac80211_hwsim_get_tsf(struct mac80211_hwsim_data *data) -{ - u64 now = mac80211_hwsim_get_tsf_raw(); - return cpu_to_le64(now + data->tsf_offset); -} - -static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct mac80211_hwsim_data *data = hw->priv; - return le64_to_cpu(__mac80211_hwsim_get_tsf(data)); -} - -static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u64 tsf) -{ - struct mac80211_hwsim_data *data = hw->priv; - u64 now = mac80211_hwsim_get_tsf(hw, vif); - u32 bcn_int = data->beacon_int; - u64 delta = abs(tsf - now); - - /* adjust after beaconing with new timestamp at old TBTT */ - if (tsf > now) { - data->tsf_offset += delta; - data->bcn_delta = do_div(delta, bcn_int); - } else { - data->tsf_offset -= delta; - data->bcn_delta = -(s64)do_div(delta, bcn_int); - } -} - -static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw, - struct sk_buff *tx_skb, - struct ieee80211_channel *chan) -{ - struct mac80211_hwsim_data *data = hw->priv; - struct sk_buff *skb; - struct hwsim_radiotap_hdr *hdr; - u16 flags; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_skb); - struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info); - - if (WARN_ON(!txrate)) - return; - - if (!netif_running(hwsim_mon)) - return; - - skb = skb_copy_expand(tx_skb, sizeof(*hdr), 0, GFP_ATOMIC); - if (skb == NULL) - return; - - hdr = skb_push(skb, sizeof(*hdr)); - hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; - hdr->hdr.it_pad = 0; - hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); - hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | - (1 << IEEE80211_RADIOTAP_RATE) | - (1 << IEEE80211_RADIOTAP_TSFT) | - (1 << IEEE80211_RADIOTAP_CHANNEL)); - hdr->rt_tsft = __mac80211_hwsim_get_tsf(data); - hdr->rt_flags = 0; - hdr->rt_rate = txrate->bitrate / 5; - hdr->rt_channel = cpu_to_le16(chan->center_freq); - flags = IEEE80211_CHAN_2GHZ; - if (txrate->flags & IEEE80211_RATE_ERP_G) - flags |= IEEE80211_CHAN_OFDM; - else - flags |= IEEE80211_CHAN_CCK; - hdr->rt_chbitmask = cpu_to_le16(flags); - - skb->dev = hwsim_mon; - skb_reset_mac_header(skb); - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->pkt_type = PACKET_OTHERHOST; - skb->protocol = htons(ETH_P_802_2); - memset(skb->cb, 0, sizeof(skb->cb)); - netif_rx(skb); -} - - -static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, - const u8 *addr) -{ - struct sk_buff *skb; - struct hwsim_radiotap_ack_hdr *hdr; - u16 flags; - struct ieee80211_hdr *hdr11; - - if (!netif_running(hwsim_mon)) - return; - - skb = dev_alloc_skb(100); - if (skb == NULL) - return; - - hdr = skb_put(skb, sizeof(*hdr)); - hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; - hdr->hdr.it_pad = 0; - hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); - hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | - (1 << IEEE80211_RADIOTAP_CHANNEL)); - hdr->rt_flags = 0; - hdr->pad = 0; - hdr->rt_channel = cpu_to_le16(chan->center_freq); - flags = IEEE80211_CHAN_2GHZ; - hdr->rt_chbitmask = cpu_to_le16(flags); - - hdr11 = skb_put(skb, 10); - hdr11->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | - IEEE80211_STYPE_ACK); - hdr11->duration_id = cpu_to_le16(0); - memcpy(hdr11->addr1, addr, ETH_ALEN); - - skb->dev = hwsim_mon; - skb_reset_mac_header(skb); - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->pkt_type = PACKET_OTHERHOST; - skb->protocol = htons(ETH_P_802_2); - memset(skb->cb, 0, sizeof(skb->cb)); - netif_rx(skb); -} - -struct mac80211_hwsim_addr_match_data { - u8 addr[ETH_ALEN]; - bool ret; -}; - -static void mac80211_hwsim_addr_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct mac80211_hwsim_addr_match_data *md = data; - - if (memcmp(mac, md->addr, ETH_ALEN) == 0) - md->ret = true; -} - -static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data, - const u8 *addr) -{ - struct mac80211_hwsim_addr_match_data md = { - .ret = false, - }; - - if (data->scanning && memcmp(addr, data->scan_addr, ETH_ALEN) == 0) - return true; - - memcpy(md.addr, addr, ETH_ALEN); - - ieee80211_iterate_active_interfaces_atomic(data->hw, - IEEE80211_IFACE_ITER_NORMAL, - mac80211_hwsim_addr_iter, - &md); - - return md.ret; -} - -static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data, - struct sk_buff *skb) -{ - switch (data->ps) { - case PS_DISABLED: - return true; - case PS_ENABLED: - return false; - case PS_AUTO_POLL: - /* TODO: accept (some) Beacons by default and other frames only - * if pending PS-Poll has been sent */ - return true; - case PS_MANUAL_POLL: - /* Allow unicast frames to own address if there is a pending - * PS-Poll */ - if (data->ps_poll_pending && - mac80211_hwsim_addr_match(data, skb->data + 4)) { - data->ps_poll_pending = false; - return true; - } - return false; - } - - return true; -} - -static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data, - struct sk_buff *skb, int portid) -{ - struct net *net; - bool found = false; - int res = -ENOENT; - - rcu_read_lock(); - for_each_net_rcu(net) { - if (data->netgroup == hwsim_net_get_netgroup(net)) { - res = genlmsg_unicast(net, skb, portid); - found = true; - break; - } - } - rcu_read_unlock(); - - if (!found) - nlmsg_free(skb); - - return res; -} - -static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, - struct sk_buff *my_skb, - int dst_portid) -{ - struct sk_buff *skb; - struct mac80211_hwsim_data *data = hw->priv; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) my_skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(my_skb); - void *msg_head; - unsigned int hwsim_flags = 0; - int i; - struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES]; - uintptr_t cookie; - - if (data->ps != PS_DISABLED) - hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); - /* If the queue contains MAX_QUEUE skb's drop some */ - if (skb_queue_len(&data->pending) >= MAX_QUEUE) { - /* Droping until WARN_QUEUE level */ - while (skb_queue_len(&data->pending) >= WARN_QUEUE) { - ieee80211_free_txskb(hw, skb_dequeue(&data->pending)); - data->tx_dropped++; - } - } - - skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); - if (skb == NULL) - goto nla_put_failure; - - msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, - HWSIM_CMD_FRAME); - if (msg_head == NULL) { - printk(KERN_DEBUG "mac80211_hwsim: problem with msg_head\n"); - goto nla_put_failure; - } - - if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, - ETH_ALEN, data->addresses[1].addr)) - goto nla_put_failure; - - /* We get the skb->data */ - if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data)) - goto nla_put_failure; - - /* We get the flags for this transmission, and we translate them to - wmediumd flags */ - - if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) - hwsim_flags |= HWSIM_TX_CTL_REQ_TX_STATUS; - - if (info->flags & IEEE80211_TX_CTL_NO_ACK) - hwsim_flags |= HWSIM_TX_CTL_NO_ACK; - - if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags)) - goto nla_put_failure; - - if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq)) - goto nla_put_failure; - - /* We get the tx control (rate and retries) info*/ - - for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - tx_attempts[i].idx = info->status.rates[i].idx; - tx_attempts[i].count = info->status.rates[i].count; - } - - if (nla_put(skb, HWSIM_ATTR_TX_INFO, - sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES, - tx_attempts)) - goto nla_put_failure; - - /* We create a cookie to identify this skb */ - data->pending_cookie++; - cookie = data->pending_cookie; - info->rate_driver_data[0] = (void *)cookie; - if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD)) - goto nla_put_failure; - - genlmsg_end(skb, msg_head); - if (hwsim_unicast_netgroup(data, skb, dst_portid)) - goto err_free_txskb; - - /* Enqueue the packet */ - skb_queue_tail(&data->pending, my_skb); - data->tx_pkts++; - data->tx_bytes += my_skb->len; - return; - -nla_put_failure: - nlmsg_free(skb); -err_free_txskb: - printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); - ieee80211_free_txskb(hw, my_skb); - data->tx_failed++; -} - -static bool hwsim_chans_compat(struct ieee80211_channel *c1, - struct ieee80211_channel *c2) -{ - if (!c1 || !c2) - return false; - - return c1->center_freq == c2->center_freq; -} - -struct tx_iter_data { - struct ieee80211_channel *channel; - bool receive; -}; - -static void mac80211_hwsim_tx_iter(void *_data, u8 *addr, - struct ieee80211_vif *vif) -{ - struct tx_iter_data *data = _data; - - if (!vif->chanctx_conf) - return; - - if (!hwsim_chans_compat(data->channel, - rcu_dereference(vif->chanctx_conf)->def.chan)) - return; - - data->receive = true; -} - -static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb) -{ - /* - * To enable this code, #define the HWSIM_RADIOTAP_OUI, - * e.g. like this: - * #define HWSIM_RADIOTAP_OUI "\x02\x00\x00" - * (but you should use a valid OUI, not that) - * - * If anyone wants to 'donate' a radiotap OUI/subns code - * please send a patch removing this #ifdef and changing - * the values accordingly. - */ -#ifdef HWSIM_RADIOTAP_OUI - struct ieee80211_vendor_radiotap *rtap; - - /* - * Note that this code requires the headroom in the SKB - * that was allocated earlier. - */ - rtap = skb_push(skb, sizeof(*rtap) + 8 + 4); - rtap->oui[0] = HWSIM_RADIOTAP_OUI[0]; - rtap->oui[1] = HWSIM_RADIOTAP_OUI[1]; - rtap->oui[2] = HWSIM_RADIOTAP_OUI[2]; - rtap->subns = 127; - - /* - * Radiotap vendor namespaces can (and should) also be - * split into fields by using the standard radiotap - * presence bitmap mechanism. Use just BIT(0) here for - * the presence bitmap. - */ - rtap->present = BIT(0); - /* We have 8 bytes of (dummy) data */ - rtap->len = 8; - /* For testing, also require it to be aligned */ - rtap->align = 8; - /* And also test that padding works, 4 bytes */ - rtap->pad = 4; - /* push the data */ - memcpy(rtap->data, "ABCDEFGH", 8); - /* make sure to clear padding, mac80211 doesn't */ - memset(rtap->data + 8, 0, 4); - - IEEE80211_SKB_RXCB(skb)->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA; -#endif -} - -static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, - struct sk_buff *skb, - struct ieee80211_channel *chan) -{ - struct mac80211_hwsim_data *data = hw->priv, *data2; - bool ack = false; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_rx_status rx_status; - u64 now; - - memset(&rx_status, 0, sizeof(rx_status)); - rx_status.flag |= RX_FLAG_MACTIME_START; - rx_status.freq = chan->center_freq; - rx_status.band = chan->band; - if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) { - rx_status.rate_idx = - ieee80211_rate_get_vht_mcs(&info->control.rates[0]); - rx_status.nss = - ieee80211_rate_get_vht_nss(&info->control.rates[0]); - rx_status.encoding = RX_ENC_VHT; - } else { - rx_status.rate_idx = info->control.rates[0].idx; - if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) - rx_status.encoding = RX_ENC_HT; - } - if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - rx_status.bw = RATE_INFO_BW_40; - else if (info->control.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH) - rx_status.bw = RATE_INFO_BW_80; - else if (info->control.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH) - rx_status.bw = RATE_INFO_BW_160; - else - rx_status.bw = RATE_INFO_BW_20; - if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) - rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; - /* TODO: simulate real signal strength (and optional packet loss) */ - rx_status.signal = -50; - if (info->control.vif) - rx_status.signal += info->control.vif->bss_conf.txpower; - - if (data->ps != PS_DISABLED) - hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); - - /* release the skb's source info */ - skb_orphan(skb); - skb_dst_drop(skb); - skb->mark = 0; - secpath_reset(skb); - nf_reset(skb); - - /* - * Get absolute mactime here so all HWs RX at the "same time", and - * absolute TX time for beacon mactime so the timestamp matches. - * Giving beacons a different mactime than non-beacons looks messy, but - * it helps the Toffset be exact and a ~10us mactime discrepancy - * probably doesn't really matter. - */ - if (ieee80211_is_beacon(hdr->frame_control) || - ieee80211_is_probe_resp(hdr->frame_control)) - now = data->abs_bcn_ts; - else - now = mac80211_hwsim_get_tsf_raw(); - - /* Copy skb to all enabled radios that are on the current frequency */ - spin_lock(&hwsim_radio_lock); - list_for_each_entry(data2, &hwsim_radios, list) { - struct sk_buff *nskb; - struct tx_iter_data tx_iter_data = { - .receive = false, - .channel = chan, - }; - - if (data == data2) - continue; - - if (!data2->started || (data2->idle && !data2->tmp_chan) || - !hwsim_ps_rx_ok(data2, skb)) - continue; - - if (!(data->group & data2->group)) - continue; - - if (data->netgroup != data2->netgroup) - continue; - - if (!hwsim_chans_compat(chan, data2->tmp_chan) && - !hwsim_chans_compat(chan, data2->channel)) { - ieee80211_iterate_active_interfaces_atomic( - data2->hw, IEEE80211_IFACE_ITER_NORMAL, - mac80211_hwsim_tx_iter, &tx_iter_data); - if (!tx_iter_data.receive) - continue; - } - - /* - * reserve some space for our vendor and the normal - * radiotap header, since we're copying anyway - */ - if (skb->len < PAGE_SIZE && paged_rx) { - struct page *page = alloc_page(GFP_ATOMIC); - - if (!page) - continue; - - nskb = dev_alloc_skb(128); - if (!nskb) { - __free_page(page); - continue; - } - - memcpy(page_address(page), skb->data, skb->len); - skb_add_rx_frag(nskb, 0, page, 0, skb->len, skb->len); - } else { - nskb = skb_copy(skb, GFP_ATOMIC); - if (!nskb) - continue; - } - - if (mac80211_hwsim_addr_match(data2, hdr->addr1)) - ack = true; - - rx_status.mactime = now + data2->tsf_offset; - - memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); - - mac80211_hwsim_add_vendor_rtap(nskb); - - data2->rx_pkts++; - data2->rx_bytes += nskb->len; - ieee80211_rx_irqsafe(data2->hw, nskb); - } - spin_unlock(&hwsim_radio_lock); - - return ack; -} - -static void mac80211_hwsim_tx(struct ieee80211_hw *hw, - struct ieee80211_tx_control *control, - struct sk_buff *skb) -{ - struct mac80211_hwsim_data *data = hw->priv; - struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (void *)skb->data; - struct ieee80211_chanctx_conf *chanctx_conf; - struct ieee80211_channel *channel; - bool ack; - u32 _portid; - - if (WARN_ON(skb->len < 10)) { - /* Should not happen; just a sanity check for addr1 use */ - ieee80211_free_txskb(hw, skb); - return; - } - - if (!data->use_chanctx) { - channel = data->channel; - } else if (txi->hw_queue == 4) { - channel = data->tmp_chan; - } else { - chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf); - if (chanctx_conf) - channel = chanctx_conf->def.chan; - else - channel = NULL; - } - - if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) { - ieee80211_free_txskb(hw, skb); - return; - } - - if (data->idle && !data->tmp_chan) { - wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n"); - ieee80211_free_txskb(hw, skb); - return; - } - - if (txi->control.vif) - hwsim_check_magic(txi->control.vif); - if (control->sta) - hwsim_check_sta_magic(control->sta); - - if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) - ieee80211_get_tx_rates(txi->control.vif, control->sta, skb, - txi->control.rates, - ARRAY_SIZE(txi->control.rates)); - - if (skb->len >= 24 + 8 && - ieee80211_is_probe_resp(hdr->frame_control)) { - /* fake header transmission time */ - struct ieee80211_mgmt *mgmt; - struct ieee80211_rate *txrate; - u64 ts; - - mgmt = (struct ieee80211_mgmt *)skb->data; - txrate = ieee80211_get_tx_rate(hw, txi); - ts = mac80211_hwsim_get_tsf_raw(); - mgmt->u.probe_resp.timestamp = - cpu_to_le64(ts + data->tsf_offset + - 24 * 8 * 10 / txrate->bitrate); - } - - mac80211_hwsim_monitor_rx(hw, skb, channel); - - /* wmediumd mode check */ - _portid = ACCESS_ONCE(data->wmediumd); - - if (_portid) - return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); - - /* NO wmediumd detected, perfect medium simulation */ - data->tx_pkts++; - data->tx_bytes += skb->len; - ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel); - - if (ack && skb->len >= 16) - mac80211_hwsim_monitor_ack(channel, hdr->addr2); - - ieee80211_tx_info_clear_status(txi); - - /* frame was transmitted at most favorable rate at first attempt */ - txi->control.rates[0].count = 1; - txi->control.rates[1].idx = -1; - - if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack) - txi->flags |= IEEE80211_TX_STAT_ACK; - ieee80211_tx_status_irqsafe(hw, skb); -} - - -static int mac80211_hwsim_start(struct ieee80211_hw *hw) -{ - struct mac80211_hwsim_data *data = hw->priv; - wiphy_debug(hw->wiphy, "%s\n", __func__); - data->started = true; - return 0; -} - - -static void mac80211_hwsim_stop(struct ieee80211_hw *hw) -{ - struct mac80211_hwsim_data *data = hw->priv; - data->started = false; - tasklet_hrtimer_cancel(&data->beacon_timer); - wiphy_debug(hw->wiphy, "%s\n", __func__); -} - - -static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", - __func__, ieee80211_vif_type_p2p(vif), - vif->addr); - hwsim_set_magic(vif); - - vif->cab_queue = 0; - vif->hw_queue[IEEE80211_AC_VO] = 0; - vif->hw_queue[IEEE80211_AC_VI] = 1; - vif->hw_queue[IEEE80211_AC_BE] = 2; - vif->hw_queue[IEEE80211_AC_BK] = 3; - - return 0; -} - - -static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum nl80211_iftype newtype, - bool newp2p) -{ - newtype = ieee80211_iftype_p2p(newtype, newp2p); - wiphy_debug(hw->wiphy, - "%s (old type=%d, new type=%d, mac_addr=%pM)\n", - __func__, ieee80211_vif_type_p2p(vif), - newtype, vif->addr); - hwsim_check_magic(vif); - - /* - * interface may change from non-AP to AP in - * which case this needs to be set up again - */ - vif->cab_queue = 0; - - return 0; -} - -static void mac80211_hwsim_remove_interface( - struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", - __func__, ieee80211_vif_type_p2p(vif), - vif->addr); - hwsim_check_magic(vif); - hwsim_clear_magic(vif); -} - -static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, - struct sk_buff *skb, - struct ieee80211_channel *chan) -{ - struct mac80211_hwsim_data *data = hw->priv; - u32 _pid = ACCESS_ONCE(data->wmediumd); - - if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { - struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); - ieee80211_get_tx_rates(txi->control.vif, NULL, skb, - txi->control.rates, - ARRAY_SIZE(txi->control.rates)); - } - - mac80211_hwsim_monitor_rx(hw, skb, chan); - - if (_pid) - return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); - - mac80211_hwsim_tx_frame_no_nl(hw, skb, chan); - dev_kfree_skb(skb); -} - -static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, - struct ieee80211_vif *vif) -{ - struct mac80211_hwsim_data *data = arg; - struct ieee80211_hw *hw = data->hw; - struct ieee80211_tx_info *info; - struct ieee80211_rate *txrate; - struct ieee80211_mgmt *mgmt; - struct sk_buff *skb; - - hwsim_check_magic(vif); - - if (vif->type != NL80211_IFTYPE_AP && - vif->type != NL80211_IFTYPE_MESH_POINT && - vif->type != NL80211_IFTYPE_ADHOC) - return; - - skb = ieee80211_beacon_get(hw, vif); - if (skb == NULL) - return; - info = IEEE80211_SKB_CB(skb); - if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) - ieee80211_get_tx_rates(vif, NULL, skb, - info->control.rates, - ARRAY_SIZE(info->control.rates)); - - txrate = ieee80211_get_tx_rate(hw, info); - - mgmt = (struct ieee80211_mgmt *) skb->data; - /* fake header transmission time */ - data->abs_bcn_ts = mac80211_hwsim_get_tsf_raw(); - mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts + - data->tsf_offset + - 24 * 8 * 10 / txrate->bitrate); - - mac80211_hwsim_tx_frame(hw, skb, - rcu_dereference(vif->chanctx_conf)->def.chan); - - if (vif->csa_active && ieee80211_csa_is_complete(vif)) - ieee80211_csa_finish(vif); -} - -static enum hrtimer_restart -mac80211_hwsim_beacon(struct hrtimer *timer) -{ - struct mac80211_hwsim_data *data = - container_of(timer, struct mac80211_hwsim_data, - beacon_timer.timer); - struct ieee80211_hw *hw = data->hw; - u64 bcn_int = data->beacon_int; - ktime_t next_bcn; - - if (!data->started) - goto out; - - ieee80211_iterate_active_interfaces_atomic( - hw, IEEE80211_IFACE_ITER_NORMAL, - mac80211_hwsim_beacon_tx, data); - - /* beacon at new TBTT + beacon interval */ - if (data->bcn_delta) { - bcn_int -= data->bcn_delta; - data->bcn_delta = 0; - } - - next_bcn = ktime_add(hrtimer_get_expires(timer), - ns_to_ktime(bcn_int * 1000)); - tasklet_hrtimer_start(&data->beacon_timer, next_bcn, HRTIMER_MODE_ABS); -out: - return HRTIMER_NORESTART; -} - -static const char * const hwsim_chanwidths[] = { - [NL80211_CHAN_WIDTH_20_NOHT] = "noht", - [NL80211_CHAN_WIDTH_20] = "ht20", - [NL80211_CHAN_WIDTH_40] = "ht40", - [NL80211_CHAN_WIDTH_80] = "vht80", - [NL80211_CHAN_WIDTH_80P80] = "vht80p80", - [NL80211_CHAN_WIDTH_160] = "vht160", -}; - -static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) -{ - struct mac80211_hwsim_data *data = hw->priv; - struct ieee80211_conf *conf = &hw->conf; - static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = { - [IEEE80211_SMPS_AUTOMATIC] = "auto", - [IEEE80211_SMPS_OFF] = "off", - [IEEE80211_SMPS_STATIC] = "static", - [IEEE80211_SMPS_DYNAMIC] = "dynamic", - }; - int idx; - - if (conf->chandef.chan) - wiphy_debug(hw->wiphy, - "%s (freq=%d(%d - %d)/%s idle=%d ps=%d smps=%s)\n", - __func__, - conf->chandef.chan->center_freq, - conf->chandef.center_freq1, - conf->chandef.center_freq2, - hwsim_chanwidths[conf->chandef.width], - !!(conf->flags & IEEE80211_CONF_IDLE), - !!(conf->flags & IEEE80211_CONF_PS), - smps_modes[conf->smps_mode]); - else - wiphy_debug(hw->wiphy, - "%s (freq=0 idle=%d ps=%d smps=%s)\n", - __func__, - !!(conf->flags & IEEE80211_CONF_IDLE), - !!(conf->flags & IEEE80211_CONF_PS), - smps_modes[conf->smps_mode]); - - data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); - - WARN_ON(conf->chandef.chan && data->use_chanctx); - - mutex_lock(&data->mutex); - if (data->scanning && conf->chandef.chan) { - for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { - if (data->survey_data[idx].channel == data->channel) { - data->survey_data[idx].start = - data->survey_data[idx].next_start; - data->survey_data[idx].end = jiffies; - break; - } - } - - data->channel = conf->chandef.chan; - - for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { - if (data->survey_data[idx].channel && - data->survey_data[idx].channel != data->channel) - continue; - data->survey_data[idx].channel = data->channel; - data->survey_data[idx].next_start = jiffies; - break; - } - } else { - data->channel = conf->chandef.chan; - } - mutex_unlock(&data->mutex); - - if (!data->started || !data->beacon_int) - tasklet_hrtimer_cancel(&data->beacon_timer); - else if (!hrtimer_is_queued(&data->beacon_timer.timer)) { - u64 tsf = mac80211_hwsim_get_tsf(hw, NULL); - u32 bcn_int = data->beacon_int; - u64 until_tbtt = bcn_int - do_div(tsf, bcn_int); - - tasklet_hrtimer_start(&data->beacon_timer, - ns_to_ktime(until_tbtt * 1000), - HRTIMER_MODE_REL); - } - - return 0; -} - - -static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags,u64 multicast) -{ - struct mac80211_hwsim_data *data = hw->priv; - - wiphy_debug(hw->wiphy, "%s\n", __func__); - - data->rx_filter = 0; - if (*total_flags & FIF_ALLMULTI) - data->rx_filter |= FIF_ALLMULTI; - - *total_flags = data->rx_filter; -} - -static void mac80211_hwsim_bcn_en_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) -{ - unsigned int *count = data; - struct hwsim_vif_priv *vp = (void *)vif->drv_priv; - - if (vp->bcn_en) - (*count)++; -} - -static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, - u32 changed) -{ - struct hwsim_vif_priv *vp = (void *)vif->drv_priv; - struct mac80211_hwsim_data *data = hw->priv; - - hwsim_check_magic(vif); - - wiphy_debug(hw->wiphy, "%s(changed=0x%x vif->addr=%pM)\n", - __func__, changed, vif->addr); - - if (changed & BSS_CHANGED_BSSID) { - wiphy_debug(hw->wiphy, "%s: BSSID changed: %pM\n", - __func__, info->bssid); - memcpy(vp->bssid, info->bssid, ETH_ALEN); - } - - if (changed & BSS_CHANGED_ASSOC) { - wiphy_debug(hw->wiphy, " ASSOC: assoc=%d aid=%d\n", - info->assoc, info->aid); - vp->assoc = info->assoc; - vp->aid = info->aid; - } - - if (changed & BSS_CHANGED_BEACON_ENABLED) { - wiphy_debug(hw->wiphy, " BCN EN: %d (BI=%u)\n", - info->enable_beacon, info->beacon_int); - vp->bcn_en = info->enable_beacon; - if (data->started && - !hrtimer_is_queued(&data->beacon_timer.timer) && - info->enable_beacon) { - u64 tsf, until_tbtt; - u32 bcn_int; - data->beacon_int = info->beacon_int * 1024; - tsf = mac80211_hwsim_get_tsf(hw, vif); - bcn_int = data->beacon_int; - until_tbtt = bcn_int - do_div(tsf, bcn_int); - tasklet_hrtimer_start(&data->beacon_timer, - ns_to_ktime(until_tbtt * 1000), - HRTIMER_MODE_REL); - } else if (!info->enable_beacon) { - unsigned int count = 0; - ieee80211_iterate_active_interfaces_atomic( - data->hw, IEEE80211_IFACE_ITER_NORMAL, - mac80211_hwsim_bcn_en_iter, &count); - wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u", - count); - if (count == 0) { - tasklet_hrtimer_cancel(&data->beacon_timer); - data->beacon_int = 0; - } - } - } - - if (changed & BSS_CHANGED_ERP_CTS_PROT) { - wiphy_debug(hw->wiphy, " ERP_CTS_PROT: %d\n", - info->use_cts_prot); - } - - if (changed & BSS_CHANGED_ERP_PREAMBLE) { - wiphy_debug(hw->wiphy, " ERP_PREAMBLE: %d\n", - info->use_short_preamble); - } - - if (changed & BSS_CHANGED_ERP_SLOT) { - wiphy_debug(hw->wiphy, " ERP_SLOT: %d\n", info->use_short_slot); - } - - if (changed & BSS_CHANGED_HT) { - wiphy_debug(hw->wiphy, " HT: op_mode=0x%x\n", - info->ht_operation_mode); - } - - if (changed & BSS_CHANGED_BASIC_RATES) { - wiphy_debug(hw->wiphy, " BASIC_RATES: 0x%llx\n", - (unsigned long long) info->basic_rates); - } - - if (changed & BSS_CHANGED_TXPOWER) - wiphy_debug(hw->wiphy, " TX Power: %d dBm\n", info->txpower); -} - -static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - hwsim_check_magic(vif); - hwsim_set_sta_magic(sta); - - return 0; -} - -static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - hwsim_check_magic(vif); - hwsim_clear_sta_magic(sta); - - return 0; -} - -static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, - struct ieee80211_sta *sta) -{ - hwsim_check_magic(vif); - - switch (cmd) { - case STA_NOTIFY_SLEEP: - case STA_NOTIFY_AWAKE: - /* TODO: make good use of these flags */ - break; - default: - WARN(1, "Invalid sta notify: %d\n", cmd); - break; - } -} - -static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - bool set) -{ - hwsim_check_sta_magic(sta); - return 0; -} - -static int mac80211_hwsim_conf_tx( - struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 queue, - const struct ieee80211_tx_queue_params *params) -{ - wiphy_debug(hw->wiphy, - "%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n", - __func__, queue, - params->txop, params->cw_min, - params->cw_max, params->aifs); - return 0; -} - -static int mac80211_hwsim_get_survey(struct ieee80211_hw *hw, int idx, - struct survey_info *survey) -{ - struct mac80211_hwsim_data *hwsim = hw->priv; - - if (idx < 0 || idx >= ARRAY_SIZE(hwsim->survey_data)) - return -ENOENT; - - mutex_lock(&hwsim->mutex); - survey->channel = hwsim->survey_data[idx].channel; - if (!survey->channel) { - mutex_unlock(&hwsim->mutex); - return -ENOENT; - } - - /* - * Magically conjured dummy values --- this is only ok for simulated hardware. - * - * A real driver which cannot determine real values noise MUST NOT - * report any, especially not a magically conjured ones :-) - */ - survey->filled = SURVEY_INFO_NOISE_DBM | - SURVEY_INFO_TIME | - SURVEY_INFO_TIME_BUSY; - survey->noise = -92; - survey->time = - jiffies_to_msecs(hwsim->survey_data[idx].end - - hwsim->survey_data[idx].start); - /* report 12.5% of channel time is used */ - survey->time_busy = survey->time/8; - mutex_unlock(&hwsim->mutex); - - return 0; -} - -#ifdef CONFIG_NL80211_TESTMODE -/* - * This section contains example code for using netlink - * attributes with the testmode command in nl80211. - */ - -/* These enums need to be kept in sync with userspace */ -enum hwsim_testmode_attr { - __HWSIM_TM_ATTR_INVALID = 0, - HWSIM_TM_ATTR_CMD = 1, - HWSIM_TM_ATTR_PS = 2, - - /* keep last */ - __HWSIM_TM_ATTR_AFTER_LAST, - HWSIM_TM_ATTR_MAX = __HWSIM_TM_ATTR_AFTER_LAST - 1 -}; - -enum hwsim_testmode_cmd { - HWSIM_TM_CMD_SET_PS = 0, - HWSIM_TM_CMD_GET_PS = 1, - HWSIM_TM_CMD_STOP_QUEUES = 2, - HWSIM_TM_CMD_WAKE_QUEUES = 3, -}; - -static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = { - [HWSIM_TM_ATTR_CMD] = { .type = NLA_U32 }, - [HWSIM_TM_ATTR_PS] = { .type = NLA_U32 }, -}; - -static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - void *data, int len) -{ - struct mac80211_hwsim_data *hwsim = hw->priv; - struct nlattr *tb[HWSIM_TM_ATTR_MAX + 1]; - struct sk_buff *skb; - int err, ps; - - err = nla_parse(tb, HWSIM_TM_ATTR_MAX, data, len, - hwsim_testmode_policy, NULL); - if (err) - return err; - - if (!tb[HWSIM_TM_ATTR_CMD]) - return -EINVAL; - - switch (nla_get_u32(tb[HWSIM_TM_ATTR_CMD])) { - case HWSIM_TM_CMD_SET_PS: - if (!tb[HWSIM_TM_ATTR_PS]) - return -EINVAL; - ps = nla_get_u32(tb[HWSIM_TM_ATTR_PS]); - return hwsim_fops_ps_write(hwsim, ps); - case HWSIM_TM_CMD_GET_PS: - skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, - nla_total_size(sizeof(u32))); - if (!skb) - return -ENOMEM; - if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps)) - goto nla_put_failure; - return cfg80211_testmode_reply(skb); - case HWSIM_TM_CMD_STOP_QUEUES: - ieee80211_stop_queues(hw); - return 0; - case HWSIM_TM_CMD_WAKE_QUEUES: - ieee80211_wake_queues(hw); - return 0; - default: - return -EOPNOTSUPP; - } - - nla_put_failure: - kfree_skb(skb); - return -ENOBUFS; -} -#endif - -static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params) -{ - struct ieee80211_sta *sta = params->sta; - enum ieee80211_ampdu_mlme_action action = params->action; - u16 tid = params->tid; - - switch (action) { - case IEEE80211_AMPDU_TX_START: - ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); - break; - case IEEE80211_AMPDU_TX_STOP_CONT: - case IEEE80211_AMPDU_TX_STOP_FLUSH: - case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: - break; - case IEEE80211_AMPDU_RX_START: - case IEEE80211_AMPDU_RX_STOP: - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static void mac80211_hwsim_flush(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - u32 queues, bool drop) -{ - /* Not implemented, queues only on kernel side */ -} - -static void hw_scan_work(struct work_struct *work) -{ - struct mac80211_hwsim_data *hwsim = - container_of(work, struct mac80211_hwsim_data, hw_scan.work); - struct cfg80211_scan_request *req = hwsim->hw_scan_request; - int dwell, i; - - mutex_lock(&hwsim->mutex); - if (hwsim->scan_chan_idx >= req->n_channels) { - struct cfg80211_scan_info info = { - .aborted = false, - }; - - wiphy_debug(hwsim->hw->wiphy, "hw scan complete\n"); - ieee80211_scan_completed(hwsim->hw, &info); - hwsim->hw_scan_request = NULL; - hwsim->hw_scan_vif = NULL; - hwsim->tmp_chan = NULL; - mutex_unlock(&hwsim->mutex); - return; - } - - wiphy_debug(hwsim->hw->wiphy, "hw scan %d MHz\n", - req->channels[hwsim->scan_chan_idx]->center_freq); - - hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx]; - if (hwsim->tmp_chan->flags & (IEEE80211_CHAN_NO_IR | - IEEE80211_CHAN_RADAR) || - !req->n_ssids) { - dwell = 120; - } else { - dwell = 30; - /* send probes */ - for (i = 0; i < req->n_ssids; i++) { - struct sk_buff *probe; - struct ieee80211_mgmt *mgmt; - - probe = ieee80211_probereq_get(hwsim->hw, - hwsim->scan_addr, - req->ssids[i].ssid, - req->ssids[i].ssid_len, - req->ie_len); - if (!probe) - continue; - - mgmt = (struct ieee80211_mgmt *) probe->data; - memcpy(mgmt->da, req->bssid, ETH_ALEN); - memcpy(mgmt->bssid, req->bssid, ETH_ALEN); - - if (req->ie_len) - skb_put_data(probe, req->ie, req->ie_len); - - local_bh_disable(); - mac80211_hwsim_tx_frame(hwsim->hw, probe, - hwsim->tmp_chan); - local_bh_enable(); - } - } - ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, - msecs_to_jiffies(dwell)); - hwsim->survey_data[hwsim->scan_chan_idx].channel = hwsim->tmp_chan; - hwsim->survey_data[hwsim->scan_chan_idx].start = jiffies; - hwsim->survey_data[hwsim->scan_chan_idx].end = - jiffies + msecs_to_jiffies(dwell); - hwsim->scan_chan_idx++; - mutex_unlock(&hwsim->mutex); -} - -static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_scan_request *hw_req) -{ - struct mac80211_hwsim_data *hwsim = hw->priv; - struct cfg80211_scan_request *req = &hw_req->req; - - mutex_lock(&hwsim->mutex); - if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) { - mutex_unlock(&hwsim->mutex); - return -EBUSY; - } - hwsim->hw_scan_request = req; - hwsim->hw_scan_vif = vif; - hwsim->scan_chan_idx = 0; - if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) - get_random_mask_addr(hwsim->scan_addr, - hw_req->req.mac_addr, - hw_req->req.mac_addr_mask); - else - memcpy(hwsim->scan_addr, vif->addr, ETH_ALEN); - memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); - mutex_unlock(&hwsim->mutex); - - wiphy_debug(hw->wiphy, "hwsim hw_scan request\n"); - - ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0); - - return 0; -} - -static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct mac80211_hwsim_data *hwsim = hw->priv; - struct cfg80211_scan_info info = { - .aborted = true, - }; - - wiphy_debug(hw->wiphy, "hwsim cancel_hw_scan\n"); - - cancel_delayed_work_sync(&hwsim->hw_scan); - - mutex_lock(&hwsim->mutex); - ieee80211_scan_completed(hwsim->hw, &info); - hwsim->tmp_chan = NULL; - hwsim->hw_scan_request = NULL; - hwsim->hw_scan_vif = NULL; - mutex_unlock(&hwsim->mutex); -} - -static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const u8 *mac_addr) -{ - struct mac80211_hwsim_data *hwsim = hw->priv; - - mutex_lock(&hwsim->mutex); - - if (hwsim->scanning) { - printk(KERN_DEBUG "two hwsim sw_scans detected!\n"); - goto out; - } - - printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n"); - - memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN); - hwsim->scanning = true; - memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); - -out: - mutex_unlock(&hwsim->mutex); -} - -static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct mac80211_hwsim_data *hwsim = hw->priv; - - mutex_lock(&hwsim->mutex); - - printk(KERN_DEBUG "hwsim sw_scan_complete\n"); - hwsim->scanning = false; - eth_zero_addr(hwsim->scan_addr); - - mutex_unlock(&hwsim->mutex); -} - -static void hw_roc_start(struct work_struct *work) -{ - struct mac80211_hwsim_data *hwsim = - container_of(work, struct mac80211_hwsim_data, roc_start.work); - - mutex_lock(&hwsim->mutex); - - wiphy_debug(hwsim->hw->wiphy, "hwsim ROC begins\n"); - hwsim->tmp_chan = hwsim->roc_chan; - ieee80211_ready_on_channel(hwsim->hw); - - ieee80211_queue_delayed_work(hwsim->hw, &hwsim->roc_done, - msecs_to_jiffies(hwsim->roc_duration)); - - mutex_unlock(&hwsim->mutex); -} - -static void hw_roc_done(struct work_struct *work) -{ - struct mac80211_hwsim_data *hwsim = - container_of(work, struct mac80211_hwsim_data, roc_done.work); - - mutex_lock(&hwsim->mutex); - ieee80211_remain_on_channel_expired(hwsim->hw); - hwsim->tmp_chan = NULL; - mutex_unlock(&hwsim->mutex); - - wiphy_debug(hwsim->hw->wiphy, "hwsim ROC expired\n"); -} - -static int mac80211_hwsim_roc(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel *chan, - int duration, - enum ieee80211_roc_type type) -{ - struct mac80211_hwsim_data *hwsim = hw->priv; - - mutex_lock(&hwsim->mutex); - if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) { - mutex_unlock(&hwsim->mutex); - return -EBUSY; - } - - hwsim->roc_chan = chan; - hwsim->roc_duration = duration; - mutex_unlock(&hwsim->mutex); - - wiphy_debug(hw->wiphy, "hwsim ROC (%d MHz, %d ms)\n", - chan->center_freq, duration); - ieee80211_queue_delayed_work(hw, &hwsim->roc_start, HZ/50); - - return 0; -} - -static int mac80211_hwsim_croc(struct ieee80211_hw *hw) -{ - struct mac80211_hwsim_data *hwsim = hw->priv; - - cancel_delayed_work_sync(&hwsim->roc_start); - cancel_delayed_work_sync(&hwsim->roc_done); - - mutex_lock(&hwsim->mutex); - hwsim->tmp_chan = NULL; - mutex_unlock(&hwsim->mutex); - - wiphy_debug(hw->wiphy, "hwsim ROC canceled\n"); - - return 0; -} - -static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx) -{ - hwsim_set_chanctx_magic(ctx); - wiphy_debug(hw->wiphy, - "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", - ctx->def.chan->center_freq, ctx->def.width, - ctx->def.center_freq1, ctx->def.center_freq2); - return 0; -} - -static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx) -{ - wiphy_debug(hw->wiphy, - "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", - ctx->def.chan->center_freq, ctx->def.width, - ctx->def.center_freq1, ctx->def.center_freq2); - hwsim_check_chanctx_magic(ctx); - hwsim_clear_chanctx_magic(ctx); -} - -static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx, - u32 changed) -{ - hwsim_check_chanctx_magic(ctx); - wiphy_debug(hw->wiphy, - "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", - ctx->def.chan->center_freq, ctx->def.width, - ctx->def.center_freq1, ctx->def.center_freq2); -} - -static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *ctx) -{ - hwsim_check_magic(vif); - hwsim_check_chanctx_magic(ctx); - - return 0; -} - -static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *ctx) -{ - hwsim_check_magic(vif); - hwsim_check_chanctx_magic(ctx); -} - -static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = { - "tx_pkts_nic", - "tx_bytes_nic", - "rx_pkts_nic", - "rx_bytes_nic", - "d_tx_dropped", - "d_tx_failed", - "d_ps_mode", - "d_group", -}; - -#define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats) - -static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - u32 sset, u8 *data) -{ - if (sset == ETH_SS_STATS) - memcpy(data, *mac80211_hwsim_gstrings_stats, - sizeof(mac80211_hwsim_gstrings_stats)); -} - -static int mac80211_hwsim_get_et_sset_count(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, int sset) -{ - if (sset == ETH_SS_STATS) - return MAC80211_HWSIM_SSTATS_LEN; - return 0; -} - -static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ethtool_stats *stats, u64 *data) -{ - struct mac80211_hwsim_data *ar = hw->priv; - int i = 0; - - data[i++] = ar->tx_pkts; - data[i++] = ar->tx_bytes; - data[i++] = ar->rx_pkts; - data[i++] = ar->rx_bytes; - data[i++] = ar->tx_dropped; - data[i++] = ar->tx_failed; - data[i++] = ar->ps; - data[i++] = ar->group; - - WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN); -} - -#define HWSIM_COMMON_OPS \ - .tx = mac80211_hwsim_tx, \ - .start = mac80211_hwsim_start, \ - .stop = mac80211_hwsim_stop, \ - .add_interface = mac80211_hwsim_add_interface, \ - .change_interface = mac80211_hwsim_change_interface, \ - .remove_interface = mac80211_hwsim_remove_interface, \ - .config = mac80211_hwsim_config, \ - .configure_filter = mac80211_hwsim_configure_filter, \ - .bss_info_changed = mac80211_hwsim_bss_info_changed, \ - .sta_add = mac80211_hwsim_sta_add, \ - .sta_remove = mac80211_hwsim_sta_remove, \ - .sta_notify = mac80211_hwsim_sta_notify, \ - .set_tim = mac80211_hwsim_set_tim, \ - .conf_tx = mac80211_hwsim_conf_tx, \ - .get_survey = mac80211_hwsim_get_survey, \ - CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) \ - .ampdu_action = mac80211_hwsim_ampdu_action, \ - .flush = mac80211_hwsim_flush, \ - .get_tsf = mac80211_hwsim_get_tsf, \ - .set_tsf = mac80211_hwsim_set_tsf, \ - .get_et_sset_count = mac80211_hwsim_get_et_sset_count, \ - .get_et_stats = mac80211_hwsim_get_et_stats, \ - .get_et_strings = mac80211_hwsim_get_et_strings, - -static const struct ieee80211_ops mac80211_hwsim_ops = { - HWSIM_COMMON_OPS - .sw_scan_start = mac80211_hwsim_sw_scan, - .sw_scan_complete = mac80211_hwsim_sw_scan_complete, -}; - -static const struct ieee80211_ops mac80211_hwsim_mchan_ops = { - HWSIM_COMMON_OPS - .hw_scan = mac80211_hwsim_hw_scan, - .cancel_hw_scan = mac80211_hwsim_cancel_hw_scan, - .sw_scan_start = NULL, - .sw_scan_complete = NULL, - .remain_on_channel = mac80211_hwsim_roc, - .cancel_remain_on_channel = mac80211_hwsim_croc, - .add_chanctx = mac80211_hwsim_add_chanctx, - .remove_chanctx = mac80211_hwsim_remove_chanctx, - .change_chanctx = mac80211_hwsim_change_chanctx, - .assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx, - .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx, -}; - -struct hwsim_new_radio_params { - unsigned int channels; - const char *reg_alpha2; - const struct ieee80211_regdomain *regd; - bool reg_strict; - bool p2p_device; - bool use_chanctx; - bool destroy_on_close; - const char *hwname; - bool no_vif; -}; - -static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb, - struct genl_info *info) -{ - if (info) - genl_notify(&hwsim_genl_family, mcast_skb, info, - HWSIM_MCGRP_CONFIG, GFP_KERNEL); - else - genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0, - HWSIM_MCGRP_CONFIG, GFP_KERNEL); -} - -static int append_radio_msg(struct sk_buff *skb, int id, - struct hwsim_new_radio_params *param) -{ - int ret; - - ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id); - if (ret < 0) - return ret; - - if (param->channels) { - ret = nla_put_u32(skb, HWSIM_ATTR_CHANNELS, param->channels); - if (ret < 0) - return ret; - } - - if (param->reg_alpha2) { - ret = nla_put(skb, HWSIM_ATTR_REG_HINT_ALPHA2, 2, - param->reg_alpha2); - if (ret < 0) - return ret; - } - - if (param->regd) { - int i; - - for (i = 0; i < ARRAY_SIZE(hwsim_world_regdom_custom); i++) { - if (hwsim_world_regdom_custom[i] != param->regd) - continue; - - ret = nla_put_u32(skb, HWSIM_ATTR_REG_CUSTOM_REG, i); - if (ret < 0) - return ret; - break; - } - } - - if (param->reg_strict) { - ret = nla_put_flag(skb, HWSIM_ATTR_REG_STRICT_REG); - if (ret < 0) - return ret; - } - - if (param->p2p_device) { - ret = nla_put_flag(skb, HWSIM_ATTR_SUPPORT_P2P_DEVICE); - if (ret < 0) - return ret; - } - - if (param->use_chanctx) { - ret = nla_put_flag(skb, HWSIM_ATTR_USE_CHANCTX); - if (ret < 0) - return ret; - } - - if (param->hwname) { - ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, - strlen(param->hwname), param->hwname); - if (ret < 0) - return ret; - } - - return 0; -} - -static void hwsim_mcast_new_radio(int id, struct genl_info *info, - struct hwsim_new_radio_params *param) -{ - struct sk_buff *mcast_skb; - void *data; - - mcast_skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!mcast_skb) - return; - - data = genlmsg_put(mcast_skb, 0, 0, &hwsim_genl_family, 0, - HWSIM_CMD_NEW_RADIO); - if (!data) - goto out_err; - - if (append_radio_msg(mcast_skb, id, param) < 0) - goto out_err; - - genlmsg_end(mcast_skb, data); - - hwsim_mcast_config_msg(mcast_skb, info); - return; - -out_err: - genlmsg_cancel(mcast_skb, data); - nlmsg_free(mcast_skb); -} - -static int mac80211_hwsim_new_radio(struct genl_info *info, - struct hwsim_new_radio_params *param) -{ - int err; - u8 addr[ETH_ALEN]; - struct mac80211_hwsim_data *data; - struct ieee80211_hw *hw; - enum nl80211_band band; - const struct ieee80211_ops *ops = &mac80211_hwsim_ops; - struct net *net; - int idx; - - if (WARN_ON(param->channels > 1 && !param->use_chanctx)) - return -EINVAL; - - spin_lock_bh(&hwsim_radio_lock); - idx = hwsim_radio_idx++; - spin_unlock_bh(&hwsim_radio_lock); - - if (param->use_chanctx) - ops = &mac80211_hwsim_mchan_ops; - hw = ieee80211_alloc_hw_nm(sizeof(*data), ops, param->hwname); - if (!hw) { - printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw failed\n"); - err = -ENOMEM; - goto failed; - } - - /* ieee80211_alloc_hw_nm may have used a default name */ - param->hwname = wiphy_name(hw->wiphy); - - if (info) - net = genl_info_net(info); - else - net = &init_net; - wiphy_net_set(hw->wiphy, net); - - data = hw->priv; - data->hw = hw; - - data->dev = device_create(hwsim_class, NULL, 0, hw, "hwsim%d", idx); - if (IS_ERR(data->dev)) { - printk(KERN_DEBUG - "mac80211_hwsim: device_create failed (%ld)\n", - PTR_ERR(data->dev)); - err = -ENOMEM; - goto failed_drvdata; - } - data->dev->driver = &mac80211_hwsim_driver.driver; - err = device_bind_driver(data->dev); - if (err != 0) { - printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n", - err); - goto failed_bind; - } - - skb_queue_head_init(&data->pending); - - SET_IEEE80211_DEV(hw, data->dev); - eth_zero_addr(addr); - addr[0] = 0x02; - addr[3] = idx >> 8; - addr[4] = idx; - memcpy(data->addresses[0].addr, addr, ETH_ALEN); - memcpy(data->addresses[1].addr, addr, ETH_ALEN); - data->addresses[1].addr[0] |= 0x40; - hw->wiphy->n_addresses = 2; - hw->wiphy->addresses = data->addresses; - - data->channels = param->channels; - data->use_chanctx = param->use_chanctx; - data->idx = idx; - data->destroy_on_close = param->destroy_on_close; - if (info) - data->portid = info->snd_portid; - - if (data->use_chanctx) { - hw->wiphy->max_scan_ssids = 255; - hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; - hw->wiphy->max_remain_on_channel_duration = 1000; - hw->wiphy->iface_combinations = &data->if_combination; - if (param->p2p_device) - data->if_combination = hwsim_if_comb_p2p_dev[0]; - else - data->if_combination = hwsim_if_comb[0]; - hw->wiphy->n_iface_combinations = 1; - /* For channels > 1 DFS is not allowed */ - data->if_combination.radar_detect_widths = 0; - data->if_combination.num_different_channels = data->channels; - } else if (param->p2p_device) { - hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev; - hw->wiphy->n_iface_combinations = - ARRAY_SIZE(hwsim_if_comb_p2p_dev); - } else { - hw->wiphy->iface_combinations = hwsim_if_comb; - hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb); - } - - INIT_DELAYED_WORK(&data->roc_start, hw_roc_start); - INIT_DELAYED_WORK(&data->roc_done, hw_roc_done); - INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work); - - hw->queues = 5; - hw->offchannel_tx_hw_queue = 4; - hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_MESH_POINT); - - if (param->p2p_device) - hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE); - - ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); - ieee80211_hw_set(hw, CHANCTX_STA_CSA); - ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); - ieee80211_hw_set(hw, QUEUE_CONTROL); - ieee80211_hw_set(hw, WANT_MONITOR_VIF); - ieee80211_hw_set(hw, AMPDU_AGGREGATION); - ieee80211_hw_set(hw, MFP_CAPABLE); - ieee80211_hw_set(hw, SIGNAL_DBM); - ieee80211_hw_set(hw, TDLS_WIDER_BW); - if (rctbl) - ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); - - hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | - WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | - WIPHY_FLAG_AP_UAPSD | - WIPHY_FLAG_HAS_CHANNEL_SWITCH; - hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR | - NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | - NL80211_FEATURE_STATIC_SMPS | - NL80211_FEATURE_DYNAMIC_SMPS | - NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; - wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); - - /* ask mac80211 to reserve space for magic */ - hw->vif_data_size = sizeof(struct hwsim_vif_priv); - hw->sta_data_size = sizeof(struct hwsim_sta_priv); - hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv); - - memcpy(data->channels_2ghz, hwsim_channels_2ghz, - sizeof(hwsim_channels_2ghz)); - memcpy(data->channels_5ghz, hwsim_channels_5ghz, - sizeof(hwsim_channels_5ghz)); - memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates)); - - for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { - struct ieee80211_supported_band *sband = &data->bands[band]; - switch (band) { - case NL80211_BAND_2GHZ: - sband->channels = data->channels_2ghz; - sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz); - sband->bitrates = data->rates; - sband->n_bitrates = ARRAY_SIZE(hwsim_rates); - break; - case NL80211_BAND_5GHZ: - sband->channels = data->channels_5ghz; - sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz); - sband->bitrates = data->rates + 4; - sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4; - - sband->vht_cap.vht_supported = true; - sband->vht_cap.cap = - IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | - IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ | - IEEE80211_VHT_CAP_RXLDPC | - IEEE80211_VHT_CAP_SHORT_GI_80 | - IEEE80211_VHT_CAP_SHORT_GI_160 | - IEEE80211_VHT_CAP_TXSTBC | - IEEE80211_VHT_CAP_RXSTBC_1 | - IEEE80211_VHT_CAP_RXSTBC_2 | - IEEE80211_VHT_CAP_RXSTBC_3 | - IEEE80211_VHT_CAP_RXSTBC_4 | - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; - sband->vht_cap.vht_mcs.rx_mcs_map = - cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 14); - sband->vht_cap.vht_mcs.tx_mcs_map = - sband->vht_cap.vht_mcs.rx_mcs_map; - break; - default: - continue; - } - - sband->ht_cap.ht_supported = true; - sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | - IEEE80211_HT_CAP_GRN_FLD | - IEEE80211_HT_CAP_SGI_20 | - IEEE80211_HT_CAP_SGI_40 | - IEEE80211_HT_CAP_DSSSCCK40; - sband->ht_cap.ampdu_factor = 0x3; - sband->ht_cap.ampdu_density = 0x6; - memset(&sband->ht_cap.mcs, 0, - sizeof(sband->ht_cap.mcs)); - sband->ht_cap.mcs.rx_mask[0] = 0xff; - sband->ht_cap.mcs.rx_mask[1] = 0xff; - sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - - hw->wiphy->bands[band] = sband; - } - - /* By default all radios belong to the first group */ - data->group = 1; - mutex_init(&data->mutex); - - data->netgroup = hwsim_net_get_netgroup(net); - - /* Enable frame retransmissions for lossy channels */ - hw->max_rates = 4; - hw->max_rate_tries = 11; - - hw->wiphy->vendor_commands = mac80211_hwsim_vendor_commands; - hw->wiphy->n_vendor_commands = - ARRAY_SIZE(mac80211_hwsim_vendor_commands); - hw->wiphy->vendor_events = mac80211_hwsim_vendor_events; - hw->wiphy->n_vendor_events = ARRAY_SIZE(mac80211_hwsim_vendor_events); - - if (param->reg_strict) - hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG; - if (param->regd) { - data->regd = param->regd; - hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; - wiphy_apply_custom_regulatory(hw->wiphy, param->regd); - /* give the regulatory workqueue a chance to run */ - schedule_timeout_interruptible(1); - } - - if (param->no_vif) - ieee80211_hw_set(hw, NO_AUTO_VIF); - - wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); - - err = ieee80211_register_hw(hw); - if (err < 0) { - printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n", - err); - goto failed_hw; - } - - wiphy_debug(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr); - - if (param->reg_alpha2) { - data->alpha2[0] = param->reg_alpha2[0]; - data->alpha2[1] = param->reg_alpha2[1]; - regulatory_hint(hw->wiphy, param->reg_alpha2); - } - - data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir); - debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps); - debugfs_create_file("group", 0666, data->debugfs, data, - &hwsim_fops_group); - if (!data->use_chanctx) - debugfs_create_file("dfs_simulate_radar", 0222, - data->debugfs, - data, &hwsim_simulate_radar); - - tasklet_hrtimer_init(&data->beacon_timer, - mac80211_hwsim_beacon, - CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - - spin_lock_bh(&hwsim_radio_lock); - list_add_tail(&data->list, &hwsim_radios); - spin_unlock_bh(&hwsim_radio_lock); - - if (idx > 0) - hwsim_mcast_new_radio(idx, info, param); - - return idx; - -failed_hw: - device_release_driver(data->dev); -failed_bind: - device_unregister(data->dev); -failed_drvdata: - ieee80211_free_hw(hw); -failed: - return err; -} - -static void hwsim_mcast_del_radio(int id, const char *hwname, - struct genl_info *info) -{ - struct sk_buff *skb; - void *data; - int ret; - - skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!skb) - return; - - data = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, - HWSIM_CMD_DEL_RADIO); - if (!data) - goto error; - - ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id); - if (ret < 0) - goto error; - - ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(hwname), - hwname); - if (ret < 0) - goto error; - - genlmsg_end(skb, data); - - hwsim_mcast_config_msg(skb, info); - - return; - -error: - nlmsg_free(skb); -} - -static void mac80211_hwsim_del_radio(struct mac80211_hwsim_data *data, - const char *hwname, - struct genl_info *info) -{ - hwsim_mcast_del_radio(data->idx, hwname, info); - debugfs_remove_recursive(data->debugfs); - ieee80211_unregister_hw(data->hw); - device_release_driver(data->dev); - device_unregister(data->dev); - ieee80211_free_hw(data->hw); -} - -static int mac80211_hwsim_get_radio(struct sk_buff *skb, - struct mac80211_hwsim_data *data, - u32 portid, u32 seq, - struct netlink_callback *cb, int flags) -{ - void *hdr; - struct hwsim_new_radio_params param = { }; - int res = -EMSGSIZE; - - hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags, - HWSIM_CMD_GET_RADIO); - if (!hdr) - return -EMSGSIZE; - - if (cb) - genl_dump_check_consistent(cb, hdr, &hwsim_genl_family); - - if (data->alpha2[0] && data->alpha2[1]) - param.reg_alpha2 = data->alpha2; - - param.reg_strict = !!(data->hw->wiphy->regulatory_flags & - REGULATORY_STRICT_REG); - param.p2p_device = !!(data->hw->wiphy->interface_modes & - BIT(NL80211_IFTYPE_P2P_DEVICE)); - param.use_chanctx = data->use_chanctx; - param.regd = data->regd; - param.channels = data->channels; - param.hwname = wiphy_name(data->hw->wiphy); - - res = append_radio_msg(skb, data->idx, ¶m); - if (res < 0) - goto out_err; - - genlmsg_end(skb, hdr); - return 0; - -out_err: - genlmsg_cancel(skb, hdr); - return res; -} - -static void mac80211_hwsim_free(void) -{ - struct mac80211_hwsim_data *data; - - spin_lock_bh(&hwsim_radio_lock); - while ((data = list_first_entry_or_null(&hwsim_radios, - struct mac80211_hwsim_data, - list))) { - list_del(&data->list); - spin_unlock_bh(&hwsim_radio_lock); - mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), - NULL); - spin_lock_bh(&hwsim_radio_lock); - } - spin_unlock_bh(&hwsim_radio_lock); - class_destroy(hwsim_class); -} - -static const struct net_device_ops hwsim_netdev_ops = { - .ndo_start_xmit = hwsim_mon_xmit, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static void hwsim_mon_setup(struct net_device *dev) -{ - dev->netdev_ops = &hwsim_netdev_ops; - dev->needs_free_netdev = true; - ether_setup(dev); - dev->priv_flags |= IFF_NO_QUEUE; - dev->type = ARPHRD_IEEE80211_RADIOTAP; - eth_zero_addr(dev->dev_addr); - dev->dev_addr[0] = 0x12; -} - -static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr) -{ - struct mac80211_hwsim_data *data; - bool _found = false; - - spin_lock_bh(&hwsim_radio_lock); - list_for_each_entry(data, &hwsim_radios, list) { - if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) { - _found = true; - break; - } - } - spin_unlock_bh(&hwsim_radio_lock); - - if (!_found) - return NULL; - - return data; -} - -static void hwsim_register_wmediumd(struct net *net, u32 portid) -{ - struct mac80211_hwsim_data *data; - - hwsim_net_set_wmediumd(net, portid); - - spin_lock_bh(&hwsim_radio_lock); - list_for_each_entry(data, &hwsim_radios, list) { - if (data->netgroup == hwsim_net_get_netgroup(net)) - data->wmediumd = portid; - } - spin_unlock_bh(&hwsim_radio_lock); -} - -static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, - struct genl_info *info) -{ - - struct ieee80211_hdr *hdr; - struct mac80211_hwsim_data *data2; - struct ieee80211_tx_info *txi; - struct hwsim_tx_rate *tx_attempts; - u64 ret_skb_cookie; - struct sk_buff *skb, *tmp; - const u8 *src; - unsigned int hwsim_flags; - int i; - bool found = false; - - if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || - !info->attrs[HWSIM_ATTR_FLAGS] || - !info->attrs[HWSIM_ATTR_COOKIE] || - !info->attrs[HWSIM_ATTR_SIGNAL] || - !info->attrs[HWSIM_ATTR_TX_INFO]) - goto out; - - src = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]); - hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]); - ret_skb_cookie = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]); - - data2 = get_hwsim_data_ref_from_addr(src); - if (!data2) - goto out; - - if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) - goto out; - - if (info->snd_portid != data2->wmediumd) - goto out; - - /* look for the skb matching the cookie passed back from user */ - skb_queue_walk_safe(&data2->pending, skb, tmp) { - u64 skb_cookie; - - txi = IEEE80211_SKB_CB(skb); - skb_cookie = (u64)(uintptr_t)txi->rate_driver_data[0]; - - if (skb_cookie == ret_skb_cookie) { - skb_unlink(skb, &data2->pending); - found = true; - break; - } - } - - /* not found */ - if (!found) - goto out; - - /* Tx info received because the frame was broadcasted on user space, - so we get all the necessary info: tx attempts and skb control buff */ - - tx_attempts = (struct hwsim_tx_rate *)nla_data( - info->attrs[HWSIM_ATTR_TX_INFO]); - - /* now send back TX status */ - txi = IEEE80211_SKB_CB(skb); - - ieee80211_tx_info_clear_status(txi); - - for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - txi->status.rates[i].idx = tx_attempts[i].idx; - txi->status.rates[i].count = tx_attempts[i].count; - /*txi->status.rates[i].flags = 0;*/ - } - - txi->status.ack_signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); - - if (!(hwsim_flags & HWSIM_TX_CTL_NO_ACK) && - (hwsim_flags & HWSIM_TX_STAT_ACK)) { - if (skb->len >= 16) { - hdr = (struct ieee80211_hdr *) skb->data; - mac80211_hwsim_monitor_ack(data2->channel, - hdr->addr2); - } - txi->flags |= IEEE80211_TX_STAT_ACK; - } - ieee80211_tx_status_irqsafe(data2->hw, skb); - return 0; -out: - return -EINVAL; - -} - -static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, - struct genl_info *info) -{ - struct mac80211_hwsim_data *data2; - struct ieee80211_rx_status rx_status; - const u8 *dst; - int frame_data_len; - void *frame_data; - struct sk_buff *skb = NULL; - - if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] || - !info->attrs[HWSIM_ATTR_FRAME] || - !info->attrs[HWSIM_ATTR_RX_RATE] || - !info->attrs[HWSIM_ATTR_SIGNAL]) - goto out; - - dst = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_RECEIVER]); - frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]); - frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]); - - /* Allocate new skb here */ - skb = alloc_skb(frame_data_len, GFP_KERNEL); - if (skb == NULL) - goto err; - - if (frame_data_len > IEEE80211_MAX_DATA_LEN) - goto err; - - /* Copy the data */ - skb_put_data(skb, frame_data, frame_data_len); - - data2 = get_hwsim_data_ref_from_addr(dst); - if (!data2) - goto out; - - if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) - goto out; - - if (info->snd_portid != data2->wmediumd) - goto out; - - /* check if radio is configured properly */ - - if (data2->idle || !data2->started) - goto out; - - /* A frame is received from user space */ - memset(&rx_status, 0, sizeof(rx_status)); - if (info->attrs[HWSIM_ATTR_FREQ]) { - /* throw away off-channel packets, but allow both the temporary - * ("hw" scan/remain-on-channel) and regular channel, since the - * internal datapath also allows this - */ - mutex_lock(&data2->mutex); - rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]); - - if (rx_status.freq != data2->channel->center_freq && - (!data2->tmp_chan || - rx_status.freq != data2->tmp_chan->center_freq)) { - mutex_unlock(&data2->mutex); - goto out; - } - mutex_unlock(&data2->mutex); - } else { - rx_status.freq = data2->channel->center_freq; - } - - rx_status.band = data2->channel->band; - rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); - rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); - - memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); - data2->rx_pkts++; - data2->rx_bytes += skb->len; - ieee80211_rx_irqsafe(data2->hw, skb); - - return 0; -err: - printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); -out: - dev_kfree_skb(skb); - return -EINVAL; -} - -static int hwsim_register_received_nl(struct sk_buff *skb_2, - struct genl_info *info) -{ - struct net *net = genl_info_net(info); - struct mac80211_hwsim_data *data; - int chans = 1; - - spin_lock_bh(&hwsim_radio_lock); - list_for_each_entry(data, &hwsim_radios, list) - chans = max(chans, data->channels); - spin_unlock_bh(&hwsim_radio_lock); - - /* In the future we should revise the userspace API and allow it - * to set a flag that it does support multi-channel, then we can - * let this pass conditionally on the flag. - * For current userspace, prohibit it since it won't work right. - */ - if (chans > 1) - return -EOPNOTSUPP; - - if (hwsim_net_get_wmediumd(net)) - return -EBUSY; - - hwsim_register_wmediumd(net, info->snd_portid); - - printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, " - "switching to wmediumd mode with pid %d\n", info->snd_portid); - - return 0; -} - -static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) -{ - struct hwsim_new_radio_params param = { 0 }; - const char *hwname = NULL; - - param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; - param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; - param.channels = channels; - param.destroy_on_close = - info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE]; - - if (info->attrs[HWSIM_ATTR_CHANNELS]) - param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); - - if (info->attrs[HWSIM_ATTR_NO_VIF]) - param.no_vif = true; - - if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { - hwname = kasprintf(GFP_KERNEL, "%.*s", - nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), - (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); - if (!hwname) - return -ENOMEM; - param.hwname = hwname; - } - - if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) - param.use_chanctx = true; - else - param.use_chanctx = (param.channels > 1); - - if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]) - param.reg_alpha2 = - nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]); - - if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { - u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); - - if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) - return -EINVAL; - param.regd = hwsim_world_regdom_custom[idx]; - } - - return mac80211_hwsim_new_radio(info, ¶m); -} - -static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) -{ - struct mac80211_hwsim_data *data; - s64 idx = -1; - const char *hwname = NULL; - - if (info->attrs[HWSIM_ATTR_RADIO_ID]) { - idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); - } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { - hwname = kasprintf(GFP_KERNEL, "%.*s", - nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), - (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); - if (!hwname) - return -ENOMEM; - } else - return -EINVAL; - - spin_lock_bh(&hwsim_radio_lock); - list_for_each_entry(data, &hwsim_radios, list) { - if (idx >= 0) { - if (data->idx != idx) - continue; - } else { - if (!hwname || - strcmp(hwname, wiphy_name(data->hw->wiphy))) - continue; - } - - if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) - continue; - - list_del(&data->list); - spin_unlock_bh(&hwsim_radio_lock); - mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), - info); - kfree(hwname); - return 0; - } - spin_unlock_bh(&hwsim_radio_lock); - - kfree(hwname); - return -ENODEV; -} - -static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) -{ - struct mac80211_hwsim_data *data; - struct sk_buff *skb; - int idx, res = -ENODEV; - - if (!info->attrs[HWSIM_ATTR_RADIO_ID]) - return -EINVAL; - idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); - - spin_lock_bh(&hwsim_radio_lock); - list_for_each_entry(data, &hwsim_radios, list) { - if (data->idx != idx) - continue; - - if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) - continue; - - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!skb) { - res = -ENOMEM; - goto out_err; - } - - res = mac80211_hwsim_get_radio(skb, data, info->snd_portid, - info->snd_seq, NULL, 0); - if (res < 0) { - nlmsg_free(skb); - goto out_err; - } - - genlmsg_reply(skb, info); - break; - } - -out_err: - spin_unlock_bh(&hwsim_radio_lock); - - return res; -} - -static int hwsim_dump_radio_nl(struct sk_buff *skb, - struct netlink_callback *cb) -{ - int idx = cb->args[0]; - struct mac80211_hwsim_data *data = NULL; - int res; - - spin_lock_bh(&hwsim_radio_lock); - - if (idx == hwsim_radio_idx) - goto done; - - list_for_each_entry(data, &hwsim_radios, list) { - if (data->idx < idx) - continue; - - if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk))) - continue; - - res = mac80211_hwsim_get_radio(skb, data, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, cb, - NLM_F_MULTI); - if (res < 0) - break; - - idx = data->idx + 1; - } - - cb->args[0] = idx; - -done: - spin_unlock_bh(&hwsim_radio_lock); - return skb->len; -} - -/* Generic Netlink operations array */ -static const struct genl_ops hwsim_ops[] = { - { - .cmd = HWSIM_CMD_REGISTER, - .policy = hwsim_genl_policy, - .doit = hwsim_register_received_nl, - .flags = GENL_UNS_ADMIN_PERM, - }, - { - .cmd = HWSIM_CMD_FRAME, - .policy = hwsim_genl_policy, - .doit = hwsim_cloned_frame_received_nl, - }, - { - .cmd = HWSIM_CMD_TX_INFO_FRAME, - .policy = hwsim_genl_policy, - .doit = hwsim_tx_info_frame_received_nl, - }, - { - .cmd = HWSIM_CMD_NEW_RADIO, - .policy = hwsim_genl_policy, - .doit = hwsim_new_radio_nl, - .flags = GENL_UNS_ADMIN_PERM, - }, - { - .cmd = HWSIM_CMD_DEL_RADIO, - .policy = hwsim_genl_policy, - .doit = hwsim_del_radio_nl, - .flags = GENL_UNS_ADMIN_PERM, - }, - { - .cmd = HWSIM_CMD_GET_RADIO, - .policy = hwsim_genl_policy, - .doit = hwsim_get_radio_nl, - .dumpit = hwsim_dump_radio_nl, - }, -}; - -static struct genl_family hwsim_genl_family __ro_after_init = { - .name = "MAC80211_HWSIM", - .version = 1, - .maxattr = HWSIM_ATTR_MAX, - .netnsok = true, - .module = THIS_MODULE, - .ops = hwsim_ops, - .n_ops = ARRAY_SIZE(hwsim_ops), - .mcgrps = hwsim_mcgrps, - .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), -}; - -static void destroy_radio(struct work_struct *work) -{ - struct mac80211_hwsim_data *data = - container_of(work, struct mac80211_hwsim_data, destroy_work); - - mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL); -} - -static void remove_user_radios(u32 portid) -{ - struct mac80211_hwsim_data *entry, *tmp; - - spin_lock_bh(&hwsim_radio_lock); - list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { - if (entry->destroy_on_close && entry->portid == portid) { - list_del(&entry->list); - INIT_WORK(&entry->destroy_work, destroy_radio); - schedule_work(&entry->destroy_work); - } - } - spin_unlock_bh(&hwsim_radio_lock); -} - -static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, - unsigned long state, - void *_notify) -{ - struct netlink_notify *notify = _notify; - - if (state != NETLINK_URELEASE) - return NOTIFY_DONE; - - remove_user_radios(notify->portid); - - if (notify->portid == hwsim_net_get_wmediumd(notify->net)) { - printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" - " socket, switching to perfect channel medium\n"); - hwsim_register_wmediumd(notify->net, 0); - } - return NOTIFY_DONE; - -} - -static struct notifier_block hwsim_netlink_notifier = { - .notifier_call = mac80211_hwsim_netlink_notify, -}; - -static int __init hwsim_init_netlink(void) -{ - int rc; - - printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); - - rc = genl_register_family(&hwsim_genl_family); - if (rc) - goto failure; - - rc = netlink_register_notifier(&hwsim_netlink_notifier); - if (rc) { - genl_unregister_family(&hwsim_genl_family); - goto failure; - } - - return 0; - -failure: - printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); - return -EINVAL; -} - -static __net_init int hwsim_init_net(struct net *net) -{ - hwsim_net_set_netgroup(net); - - return 0; -} - -static void __net_exit hwsim_exit_net(struct net *net) -{ - struct mac80211_hwsim_data *data, *tmp; - - spin_lock_bh(&hwsim_radio_lock); - list_for_each_entry_safe(data, tmp, &hwsim_radios, list) { - if (!net_eq(wiphy_net(data->hw->wiphy), net)) - continue; - - /* Radios created in init_net are returned to init_net. */ - if (data->netgroup == hwsim_net_get_netgroup(&init_net)) - continue; - - list_del(&data->list); - INIT_WORK(&data->destroy_work, destroy_radio); - schedule_work(&data->destroy_work); - } - spin_unlock_bh(&hwsim_radio_lock); -} - -static struct pernet_operations hwsim_net_ops = { - .init = hwsim_init_net, - .exit = hwsim_exit_net, - .id = &hwsim_net_id, - .size = sizeof(struct hwsim_net), -}; - -static void hwsim_exit_netlink(void) -{ - /* unregister the notifier */ - netlink_unregister_notifier(&hwsim_netlink_notifier); - /* unregister the family */ - genl_unregister_family(&hwsim_genl_family); -} - -static int __init init_mac80211_hwsim(void) -{ - int i, err; - - if (radios < 0 || radios > 100) - return -EINVAL; - - if (channels < 1) - return -EINVAL; - - spin_lock_init(&hwsim_radio_lock); - - err = register_pernet_device(&hwsim_net_ops); - if (err) - return err; - - err = platform_driver_register(&mac80211_hwsim_driver); - if (err) - goto out_unregister_pernet; - - hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); - if (IS_ERR(hwsim_class)) { - err = PTR_ERR(hwsim_class); - goto out_unregister_driver; - } - - err = hwsim_init_netlink(); - if (err < 0) - goto out_unregister_driver; - - for (i = 0; i < radios; i++) { - struct hwsim_new_radio_params param = { 0 }; - - param.channels = channels; - - switch (regtest) { - case HWSIM_REGTEST_DIFF_COUNTRY: - if (i < ARRAY_SIZE(hwsim_alpha2s)) - param.reg_alpha2 = hwsim_alpha2s[i]; - break; - case HWSIM_REGTEST_DRIVER_REG_FOLLOW: - if (!i) - param.reg_alpha2 = hwsim_alpha2s[0]; - break; - case HWSIM_REGTEST_STRICT_ALL: - param.reg_strict = true; - case HWSIM_REGTEST_DRIVER_REG_ALL: - param.reg_alpha2 = hwsim_alpha2s[0]; - break; - case HWSIM_REGTEST_WORLD_ROAM: - if (i == 0) - param.regd = &hwsim_world_regdom_custom_01; - break; - case HWSIM_REGTEST_CUSTOM_WORLD: - param.regd = &hwsim_world_regdom_custom_01; - break; - case HWSIM_REGTEST_CUSTOM_WORLD_2: - if (i == 0) - param.regd = &hwsim_world_regdom_custom_01; - else if (i == 1) - param.regd = &hwsim_world_regdom_custom_02; - break; - case HWSIM_REGTEST_STRICT_FOLLOW: - if (i == 0) { - param.reg_strict = true; - param.reg_alpha2 = hwsim_alpha2s[0]; - } - break; - case HWSIM_REGTEST_STRICT_AND_DRIVER_REG: - if (i == 0) { - param.reg_strict = true; - param.reg_alpha2 = hwsim_alpha2s[0]; - } else if (i == 1) { - param.reg_alpha2 = hwsim_alpha2s[1]; - } - break; - case HWSIM_REGTEST_ALL: - switch (i) { - case 0: - param.regd = &hwsim_world_regdom_custom_01; - break; - case 1: - param.regd = &hwsim_world_regdom_custom_02; - break; - case 2: - param.reg_alpha2 = hwsim_alpha2s[0]; - break; - case 3: - param.reg_alpha2 = hwsim_alpha2s[1]; - break; - case 4: - param.reg_strict = true; - param.reg_alpha2 = hwsim_alpha2s[2]; - break; - } - break; - default: - break; - } - - param.p2p_device = support_p2p_device; - param.use_chanctx = channels > 1; - - err = mac80211_hwsim_new_radio(NULL, ¶m); - if (err < 0) - goto out_free_radios; - } - - hwsim_mon = alloc_netdev(0, "hwsim%d", NET_NAME_UNKNOWN, - hwsim_mon_setup); - if (hwsim_mon == NULL) { - err = -ENOMEM; - goto out_free_radios; - } - - rtnl_lock(); - err = dev_alloc_name(hwsim_mon, hwsim_mon->name); - if (err < 0) { - rtnl_unlock(); - goto out_free_radios; - } - - err = register_netdevice(hwsim_mon); - if (err < 0) { - rtnl_unlock(); - goto out_free_mon; - } - rtnl_unlock(); - - return 0; - -out_free_mon: - free_netdev(hwsim_mon); -out_free_radios: - mac80211_hwsim_free(); -out_unregister_driver: - platform_driver_unregister(&mac80211_hwsim_driver); -out_unregister_pernet: - unregister_pernet_device(&hwsim_net_ops); - return err; -} -module_init(init_mac80211_hwsim); - -static void __exit exit_mac80211_hwsim(void) -{ - printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n"); - - hwsim_exit_netlink(); - - mac80211_hwsim_free(); - unregister_netdev(hwsim_mon); - platform_driver_unregister(&mac80211_hwsim_driver); - unregister_pernet_device(&hwsim_net_ops); -} -module_exit(exit_mac80211_hwsim); diff --git a/mac80211_hwsim/mac80211_hwsim.h b/mac80211_hwsim/mac80211_hwsim.h deleted file mode 100644 index 3f5eda5..0000000 --- a/mac80211_hwsim/mac80211_hwsim.h +++ /dev/null @@ -1,174 +0,0 @@ -/* - * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 - * Copyright (c) 2008, Jouni Malinen - * Copyright (c) 2011, Javier Lopez - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __MAC80211_HWSIM_H -#define __MAC80211_HWSIM_H - -/** - * enum hwsim_tx_control_flags - flags to describe transmission info/status - * - * These flags are used to give the wmediumd extra information in order to - * modify its behavior for each frame - * - * @HWSIM_TX_CTL_REQ_TX_STATUS: require TX status callback for this frame. - * @HWSIM_TX_CTL_NO_ACK: tell the wmediumd not to wait for an ack - * @HWSIM_TX_STAT_ACK: Frame was acknowledged - * - */ -enum hwsim_tx_control_flags { - HWSIM_TX_CTL_REQ_TX_STATUS = BIT(0), - HWSIM_TX_CTL_NO_ACK = BIT(1), - HWSIM_TX_STAT_ACK = BIT(2), -}; - -/** - * DOC: Frame transmission/registration support - * - * Frame transmission and registration support exists to allow userspace - * entities such as wmediumd to receive and process all broadcasted - * frames from a mac80211_hwsim radio device. - * - * This allow user space applications to decide if the frame should be - * dropped or not and implement a wireless medium simulator at user space. - * - * Registration is done by sending a register message to the driver and - * will be automatically unregistered if the user application doesn't - * responds to sent frames. - * Once registered the user application has to take responsibility of - * broadcasting the frames to all listening mac80211_hwsim radio - * interfaces. - * - * For more technical details, see the corresponding command descriptions - * below. - */ - -/** - * enum hwsim_commands - supported hwsim commands - * - * @HWSIM_CMD_UNSPEC: unspecified command to catch errors - * - * @HWSIM_CMD_REGISTER: request to register and received all broadcasted - * frames by any mac80211_hwsim radio device. - * @HWSIM_CMD_FRAME: send/receive a broadcasted frame from/to kernel/user - * space, uses: - * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_ADDR_RECEIVER, - * %HWSIM_ATTR_FRAME, %HWSIM_ATTR_FLAGS, %HWSIM_ATTR_RX_RATE, - * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE, %HWSIM_ATTR_FREQ (optional) - * @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to - * kernel, uses: - * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS, - * %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE - * @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters, - * returns the radio ID (>= 0) or negative on errors, if successful - * then multicast the result - * @HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted - * @HWSIM_CMD_GET_RADIO: fetch information about existing radios, uses: - * %HWSIM_ATTR_RADIO_ID - * @__HWSIM_CMD_MAX: enum limit - */ -enum { - HWSIM_CMD_UNSPEC, - HWSIM_CMD_REGISTER, - HWSIM_CMD_FRAME, - HWSIM_CMD_TX_INFO_FRAME, - HWSIM_CMD_NEW_RADIO, - HWSIM_CMD_DEL_RADIO, - HWSIM_CMD_GET_RADIO, - __HWSIM_CMD_MAX, -}; -#define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1) - -#define HWSIM_CMD_CREATE_RADIO HWSIM_CMD_NEW_RADIO -#define HWSIM_CMD_DESTROY_RADIO HWSIM_CMD_DEL_RADIO - -/** - * enum hwsim_attrs - hwsim netlink attributes - * - * @HWSIM_ATTR_UNSPEC: unspecified attribute to catch errors - * - * @HWSIM_ATTR_ADDR_RECEIVER: MAC address of the radio device that - * the frame is broadcasted to - * @HWSIM_ATTR_ADDR_TRANSMITTER: MAC address of the radio device that - * the frame was broadcasted from - * @HWSIM_ATTR_FRAME: Data array - * @HWSIM_ATTR_FLAGS: mac80211 transmission flags, used to process - properly the frame at user space - * @HWSIM_ATTR_RX_RATE: estimated rx rate index for this frame at user - space - * @HWSIM_ATTR_SIGNAL: estimated RX signal for this frame at user - space - * @HWSIM_ATTR_TX_INFO: ieee80211_tx_rate array - * @HWSIM_ATTR_COOKIE: sk_buff cookie to identify the frame - * @HWSIM_ATTR_CHANNELS: u32 attribute used with the %HWSIM_CMD_CREATE_RADIO - * command giving the number of channels supported by the new radio - * @HWSIM_ATTR_RADIO_ID: u32 attribute used with %HWSIM_CMD_DESTROY_RADIO - * only to destroy a radio - * @HWSIM_ATTR_REG_HINT_ALPHA2: alpha2 for regulatoro driver hint - * (nla string, length 2) - * @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute) - * @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute) - * @HWSIM_ATTR_SUPPORT_P2P_DEVICE: support P2P Device virtual interface (flag) - * @HWSIM_ATTR_USE_CHANCTX: used with the %HWSIM_CMD_CREATE_RADIO - * command to force use of channel contexts even when only a - * single channel is supported - * @HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE: used with the %HWSIM_CMD_CREATE_RADIO - * command to force radio removal when process that created the radio dies - * @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666 - * @HWSIM_ATTR_NO_VIF: Do not create vif (wlanX) when creating radio. - * @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received. - * @__HWSIM_ATTR_MAX: enum limit - */ - - -enum { - HWSIM_ATTR_UNSPEC, - HWSIM_ATTR_ADDR_RECEIVER, - HWSIM_ATTR_ADDR_TRANSMITTER, - HWSIM_ATTR_FRAME, - HWSIM_ATTR_FLAGS, - HWSIM_ATTR_RX_RATE, - HWSIM_ATTR_SIGNAL, - HWSIM_ATTR_TX_INFO, - HWSIM_ATTR_COOKIE, - HWSIM_ATTR_CHANNELS, - HWSIM_ATTR_RADIO_ID, - HWSIM_ATTR_REG_HINT_ALPHA2, - HWSIM_ATTR_REG_CUSTOM_REG, - HWSIM_ATTR_REG_STRICT_REG, - HWSIM_ATTR_SUPPORT_P2P_DEVICE, - HWSIM_ATTR_USE_CHANCTX, - HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE, - HWSIM_ATTR_RADIO_NAME, - HWSIM_ATTR_NO_VIF, - HWSIM_ATTR_FREQ, - HWSIM_ATTR_PAD, - __HWSIM_ATTR_MAX, -}; -#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1) - -/** - * struct hwsim_tx_rate - rate selection/status - * - * @idx: rate index to attempt to send with - * @count: number of tries in this rate before going to the next rate - * - * A value of -1 for @idx indicates an invalid rate and, if used - * in an array of retry rates, that no more rates should be tried. - * - * When used for transmit status reporting, the driver should - * always report the rate and number of retries used. - * - */ -struct hwsim_tx_rate { - s8 idx; - u8 count; -} __packed; - -#endif /* __MAC80211_HWSIM_H */ From c68a6595ca2d6f9f82fbf8e48b8f0de492e61597 Mon Sep 17 00:00:00 2001 From: Jeevaka Prabu Badrappan Date: Wed, 18 Mar 2020 17:16:37 +0530 Subject: [PATCH 8/8] Update binderfs_device to be same as kernel 5.4 and above binderfs_device used in android-entry is updated to be compatible with inbuilt kernel binderfs module. In order to be compatible with android-entry, binderfs_device structure updated. Tracked-On: OAM-89615 Signed-off-by: Jeevaka Prabu Badrappan --- binder/binder_ctl.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/binder/binder_ctl.h b/binder/binder_ctl.h index 65b2efd..b638098 100644 --- a/binder/binder_ctl.h +++ b/binder/binder_ctl.h @@ -22,8 +22,8 @@ */ struct binderfs_device { char name[BINDERFS_MAX_NAME + 1]; - __u8 major; - __u8 minor; + __u32 major; + __u32 minor; }; /**