diff --git a/binder/Makefile b/binder/Makefile index 0126060..b4ac7d8 100644 --- a/binder/Makefile +++ b/binder/Makefile @@ -1,7 +1,7 @@ -ccflags-y += -I$(src) -Wno-int-conversion -Wno-error=incompatible-pointer-types -DCONFIG_ANDROID_BINDER_DEVICES="\"\"" -DCONFIG_ANDROID_BINDERFS - -obj-m := binder_linux.o -binder_linux-y := binder.o binder_alloc.o deps.o binderfs.o +ccflags-y += -I$(src) -Wno-int-conversion -Wno-error=incompatible-pointer-types -DCONFIG_ANDROID_BINDER_DEVICES="\"hostbinder,hostvndbinder,hosthwbinder\"" -DCONFIG_ANDROID_BINDERFS +obj-m := binderfs_module.o binder_module.o +binder_module-y := deps.o binder.o binder_alloc.o +binderfs_module-y := deps.o binderfs.o KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build @@ -9,7 +9,9 @@ all: $(MAKE) -C $(KERNEL_SRC) V=0 M=$$PWD install: - cp binder_linux.ko $(DESTDIR)/ - insmod binder_linux.ko + cp binder_module.ko $(DESTDIR)/ + cp binderfs_module.ko $(DESTDIR)/ + insmod binder_module.ko + insmod binderfs_module.ko clean: rm -rf *.o *.ko *.mod.c *.symvers *.order .*.cmd .tmp_versions *.ur-safe diff --git a/binder/binder.c b/binder/binder.c index 89d715a..caa9e9e 100644 --- a/binder/binder.c +++ b/binder/binder.c @@ -51,6 +51,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -70,38 +71,22 @@ #include #include #include -#include -#include #include -#include -#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#include +#include +#endif -#include "binder.h" +#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT +#define BINDER_IPC_32BIT 1 +#endif + +#include #include "binder_alloc.h" #include "binder_internal.h" #include "binder_trace.h" -#include "deps.h" - -#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 16, 0) -typedef unsigned __bitwise __poll_t; -typedef int vm_fault_t; - -#define DEFINE_SHOW_ATTRIBUTE(__name) \ -static int __name ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __name ## _show, inode->i_private); \ -} \ - \ -static const struct file_operations __name ## _fops = { \ - .owner = THIS_MODULE, \ - .open = __name ## _open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ -} - -#endif static HLIST_HEAD(binder_deferred_list); static DEFINE_MUTEX(binder_deferred_lock); @@ -117,8 +102,22 @@ static struct dentry *binder_debugfs_dir_entry_root; static struct dentry *binder_debugfs_dir_entry_proc; static atomic_t binder_last_id; -static int proc_show(struct seq_file *m, void *unused); -DEFINE_SHOW_ATTRIBUTE(proc); +#define BINDER_DEBUG_ENTRY(name) \ +static int binder_##name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, binder_##name##_show, inode->i_private); \ +} \ +\ +static const struct file_operations binder_##name##_fops = { \ + .owner = THIS_MODULE, \ + .open = binder_##name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +static int binder_proc_show(struct seq_file *m, void *unused); +BINDER_DEBUG_ENTRY(proc); /* This is only defined in include/asm-arm/sizes.h */ #ifndef SZ_1K @@ -150,9 +149,10 @@ enum { }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; -module_param_named(debug_mask, binder_debug_mask, uint, 0644); +//yangbin tmp comments because it will cause crash when we insmod +//module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); -static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; +static char *binder_devices_param = (strcmp(CONFIG_ANDROID_BINDER_DEVICES, "")) ? CONFIG_ANDROID_BINDER_DEVICES : "hostbinder,hostvndbinder,hosthwbinder"; module_param_named(devices, binder_devices_param, charp, 0444); static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); @@ -174,18 +174,18 @@ static int binder_set_stop_on_user_error(const char *val, return ret; } module_param_call(stop_on_user_error, binder_set_stop_on_user_error, - param_get_int, &binder_stop_on_user_error, 0644); + param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); #define binder_debug(mask, x...) \ do { \ if (binder_debug_mask & mask) \ - pr_info_ratelimited(x); \ + pr_info(x); \ } while (0) #define binder_user_error(x...) \ do { \ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ - pr_info_ratelimited(x); \ + pr_info(x); \ if (binder_stop_on_user_error) \ binder_stop_on_user_error = 2; \ } while (0) @@ -263,7 +263,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add( unsigned int cur = atomic_inc_return(&log->cur); if (cur >= ARRAY_SIZE(log->entry)) - log->full = true; + log->full = 1; e = &log->entry[cur % ARRAY_SIZE(log->entry)]; WRITE_ONCE(e->debug_id_done, 0); /* @@ -355,8 +355,6 @@ struct binder_error { * (invariant after initialized) * @min_priority: minimum scheduling priority * (invariant after initialized) - * @txn_security_ctx: require sender's security context - * (invariant after initialized) * @async_todo: list of async work items * (protected by @proc->inner_lock) * @@ -393,7 +391,6 @@ struct binder_node { * invariant after initialization */ u8 accept_fds:1; - u8 txn_security_ctx:1; u8 min_priority; }; bool has_async_transaction; @@ -461,8 +458,9 @@ struct binder_ref { }; enum binder_deferred_state { - BINDER_DEFERRED_FLUSH = 0x01, - BINDER_DEFERRED_RELEASE = 0x02, + BINDER_DEFERRED_PUT_FILES = 0x01, + BINDER_DEFERRED_FLUSH = 0x02, + BINDER_DEFERRED_RELEASE = 0x04, }; /** @@ -483,6 +481,9 @@ enum binder_deferred_state { * (invariant after initialized) * @tsk task_struct for group_leader of process * (invariant after initialized) + * @files files_struct for process + * (protected by @files_lock) + * @files_lock mutex to protect @files * @deferred_work_node: element for binder_deferred_list * (protected by binder_deferred_lock) * @deferred_work: bitmap of deferred work to perform @@ -492,6 +493,8 @@ enum binder_deferred_state { * (protected by @inner_lock) * @todo: list of work for this process * (protected by @inner_lock) + * @wait: wait queue head to wait for proc work + * (invariant after initialized) * @stats: per-process binder statistics * (atomics, no lock needed) * @delivered_death: list of delivered death notification @@ -527,11 +530,14 @@ struct binder_proc { struct list_head waiting_threads; int pid; struct task_struct *tsk; + struct files_struct *files; + struct mutex files_lock; struct hlist_node deferred_work_node; int deferred_work; bool is_dead; struct list_head todo; + wait_queue_head_t wait; struct binder_stats stats; struct list_head delivered_death; int max_threads; @@ -573,8 +579,6 @@ enum { * (protected by @proc->inner_lock) * @todo: list of work to do for this thread * (protected by @proc->inner_lock) - * @process_todo: whether work in @todo should be processed - * (protected by @proc->inner_lock) * @return_error: transaction errors reported by this thread * (only accessed by this thread) * @reply_error: transaction errors reported by target thread @@ -600,7 +604,6 @@ struct binder_thread { bool looper_need_return; /* can be written by other thread */ struct binder_transaction *transaction_stack; struct list_head todo; - bool process_todo; struct binder_error return_error; struct binder_error reply_error; wait_queue_head_t wait; @@ -609,23 +612,6 @@ struct binder_thread { bool is_dead; }; -/** - * struct binder_txn_fd_fixup - transaction fd fixup list element - * @fixup_entry: list entry - * @file: struct file to be associated with new fd - * @offset: offset in buffer data to this fixup - * - * List element for fd fixups in a transaction. Since file - * descriptors need to be allocated in the context of the - * target process, we pass each fd to be processed in this - * struct. - */ -struct binder_txn_fd_fixup { - struct list_head fixup_entry; - struct file *file; - size_t offset; -}; - struct binder_transaction { int debug_id; struct binder_work work; @@ -643,8 +629,6 @@ struct binder_transaction { long priority; long saved_priority; kuid_t sender_euid; - struct list_head fd_fixups; - binder_uintptr_t security_ctx; /** * @lock: protects @from, @to_proc, and @to_thread * @@ -664,7 +648,6 @@ struct binder_transaction { #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) static void _binder_proc_lock(struct binder_proc *proc, int line) - __acquires(&proc->outer_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -680,7 +663,6 @@ _binder_proc_lock(struct binder_proc *proc, int line) #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) static void _binder_proc_unlock(struct binder_proc *proc, int line) - __releases(&proc->outer_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -696,7 +678,6 @@ _binder_proc_unlock(struct binder_proc *proc, int line) #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) static void _binder_inner_proc_lock(struct binder_proc *proc, int line) - __acquires(&proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -712,7 +693,6 @@ _binder_inner_proc_lock(struct binder_proc *proc, int line) #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) static void _binder_inner_proc_unlock(struct binder_proc *proc, int line) - __releases(&proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -728,7 +708,6 @@ _binder_inner_proc_unlock(struct binder_proc *proc, int line) #define binder_node_lock(node) _binder_node_lock(node, __LINE__) static void _binder_node_lock(struct binder_node *node, int line) - __acquires(&node->lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -744,7 +723,6 @@ _binder_node_lock(struct binder_node *node, int line) #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) static void _binder_node_unlock(struct binder_node *node, int line) - __releases(&node->lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); @@ -761,16 +739,12 @@ _binder_node_unlock(struct binder_node *node, int line) #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) static void _binder_node_inner_lock(struct binder_node *node, int line) - __acquires(&node->lock) __acquires(&node->proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_lock(&node->lock); if (node->proc) binder_inner_proc_lock(node->proc); - else - /* annotation for sparse */ - __acquire(&node->proc->inner_lock); } /** @@ -782,7 +756,6 @@ _binder_node_inner_lock(struct binder_node *node, int line) #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) static void _binder_node_inner_unlock(struct binder_node *node, int line) - __releases(&node->lock) __releases(&node->proc->inner_lock) { struct binder_proc *proc = node->proc; @@ -790,9 +763,6 @@ _binder_node_inner_unlock(struct binder_node *node, int line) "%s: line=%d\n", __func__, line); if (proc) binder_inner_proc_unlock(proc); - else - /* annotation for sparse */ - __release(&node->proc->inner_lock); spin_unlock(&node->lock); } @@ -819,16 +789,6 @@ static bool binder_worklist_empty(struct binder_proc *proc, return ret; } -/** - * binder_enqueue_work_ilocked() - Add an item to the work list - * @work: struct binder_work to add to list - * @target_list: list to add work to - * - * Adds the work to the specified list. Asserts that work - * is not already on a list. - * - * Requires the proc->inner_lock to be held. - */ static void binder_enqueue_work_ilocked(struct binder_work *work, struct list_head *target_list) @@ -839,58 +799,22 @@ binder_enqueue_work_ilocked(struct binder_work *work, } /** - * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work - * @thread: thread to queue work to - * @work: struct binder_work to add to list - * - * Adds the work to the todo list of the thread. Doesn't set the process_todo - * flag, which means that (if it wasn't already set) the thread will go to - * sleep without handling this work when it calls read. - * - * Requires the proc->inner_lock to be held. - */ -static void -binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, - struct binder_work *work) -{ - WARN_ON(!list_empty(&thread->waiting_thread_node)); - binder_enqueue_work_ilocked(work, &thread->todo); -} - -/** - * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list - * @thread: thread to queue work to - * @work: struct binder_work to add to list - * - * Adds the work to the todo list of the thread, and enables processing - * of the todo queue. - * - * Requires the proc->inner_lock to be held. - */ -static void -binder_enqueue_thread_work_ilocked(struct binder_thread *thread, - struct binder_work *work) -{ - WARN_ON(!list_empty(&thread->waiting_thread_node)); - binder_enqueue_work_ilocked(work, &thread->todo); - thread->process_todo = true; -} - -/** - * binder_enqueue_thread_work() - Add an item to the thread work list - * @thread: thread to queue work to + * binder_enqueue_work() - Add an item to the work list + * @proc: binder_proc associated with list * @work: struct binder_work to add to list + * @target_list: list to add work to * - * Adds the work to the todo list of the thread, and enables processing - * of the todo queue. + * Adds the work to the specified list. Asserts that work + * is not already on a list. */ static void -binder_enqueue_thread_work(struct binder_thread *thread, - struct binder_work *work) +binder_enqueue_work(struct binder_proc *proc, + struct binder_work *work, + struct list_head *target_list) { - binder_inner_proc_lock(thread->proc); - binder_enqueue_thread_work_ilocked(thread, work); - binder_inner_proc_unlock(thread->proc); + binder_inner_proc_lock(proc); + binder_enqueue_work_ilocked(work, target_list); + binder_inner_proc_unlock(proc); } static void @@ -953,10 +877,70 @@ static void binder_free_thread(struct binder_thread *thread); static void binder_free_proc(struct binder_proc *proc); static void binder_inc_node_tmpref_ilocked(struct binder_node *node); +static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) +{ + unsigned long rlim_cur; + unsigned long irqs; + int ret; + + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + ret = -ESRCH; + goto err; + } + if (!lock_task_sighand(proc->tsk, &irqs)) { + ret = -EMFILE; + goto err; + } + rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); + unlock_task_sighand(proc->tsk, &irqs); + + ret = __alloc_fd(proc->files, 0, rlim_cur, flags); +err: + mutex_unlock(&proc->files_lock); + return ret; +} + +/* + * copied from fd_install + */ +static void task_fd_install( + struct binder_proc *proc, unsigned int fd, struct file *file) +{ + mutex_lock(&proc->files_lock); + if (proc->files) + __fd_install(proc->files, fd, file); + mutex_unlock(&proc->files_lock); +} + +/* + * copied from sys_close + */ +static long task_close_fd(struct binder_proc *proc, unsigned int fd) +{ + int retval; + + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + retval = -ESRCH; + goto err; + } + retval = __close_fd(proc->files, fd); + /* can't restart close syscall because file table entry was cleared */ + if (unlikely(retval == -ERESTARTSYS || + retval == -ERESTARTNOINTR || + retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK)) + retval = -EINTR; +err: + mutex_unlock(&proc->files_lock); + return retval; +} + static bool binder_has_work_ilocked(struct binder_thread *thread, bool do_proc_work) { - return thread->process_todo || + return !binder_worklist_empty_ilocked(&thread->todo) || thread->looper_need_return || (do_proc_work && !binder_worklist_empty_ilocked(&thread->proc->todo)); @@ -1182,7 +1166,6 @@ static struct binder_node *binder_init_node_ilocked( node->work.type = BINDER_WORK_NODE; node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); - node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); @@ -1244,12 +1227,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, } else node->local_strong_refs++; if (!node->has_strong_ref && target_list) { - struct binder_thread *thread = container_of(target_list, - struct binder_thread, todo); binder_dequeue_work_ilocked(&node->work); - BUG_ON(&thread->todo != target_list); - binder_enqueue_deferred_thread_work_ilocked(thread, - &node->work); + binder_enqueue_work_ilocked(&node->work, target_list); } } else { if (!internal) @@ -1260,9 +1239,6 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, node->debug_id); return -EINVAL; } - /* - * See comment above - */ binder_enqueue_work_ilocked(&node->work, target_list); } } @@ -1403,14 +1379,10 @@ static void binder_dec_node_tmpref(struct binder_node *node) binder_node_inner_lock(node); if (!node->proc) spin_lock(&binder_dead_nodes_lock); - else - __acquire(&binder_dead_nodes_lock); node->tmp_refs--; BUG_ON(node->tmp_refs < 0); if (!node->proc) spin_unlock(&binder_dead_nodes_lock); - else - __release(&binder_dead_nodes_lock); /* * Call binder_dec_node() to check if all refcounts are 0 * and cleanup is needed. Calling with strong=0 and internal=1 @@ -1913,62 +1885,26 @@ static struct binder_thread *binder_get_txn_from( */ static struct binder_thread *binder_get_txn_from_and_acq_inner( struct binder_transaction *t) - __acquires(&t->from->proc->inner_lock) { struct binder_thread *from; from = binder_get_txn_from(t); - if (!from) { - __acquire(&from->proc->inner_lock); + if (!from) return NULL; - } binder_inner_proc_lock(from->proc); if (t->from) { BUG_ON(from != t->from); return from; } binder_inner_proc_unlock(from->proc); - __acquire(&from->proc->inner_lock); binder_thread_dec_tmpref(from); return NULL; } -/** - * binder_free_txn_fixups() - free unprocessed fd fixups - * @t: binder transaction for t->from - * - * If the transaction is being torn down prior to being - * processed by the target process, free all of the - * fd fixups and fput the file structs. It is safe to - * call this function after the fixups have been - * processed -- in that case, the list will be empty. - */ -static void binder_free_txn_fixups(struct binder_transaction *t) -{ - struct binder_txn_fd_fixup *fixup, *tmp; - - list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { - fput(fixup->file); - list_del(&fixup->fixup_entry); - kfree(fixup); - } -} - static void binder_free_transaction(struct binder_transaction *t) { - struct binder_proc *target_proc = t->to_proc; - - if (target_proc) { - binder_inner_proc_lock(target_proc); - if (t->buffer) - t->buffer->transaction = NULL; - binder_inner_proc_unlock(target_proc); - } - /* - * If the transaction has no target_proc, then - * t->buffer->transaction has already been cleared. - */ - binder_free_txn_fixups(t); + if (t->buffer) + t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } @@ -1992,26 +1928,18 @@ static void binder_send_failed_reply(struct binder_transaction *t, binder_pop_transaction_ilocked(target_thread, t); if (target_thread->reply_error.cmd == BR_OK) { target_thread->reply_error.cmd = error_code; - binder_enqueue_thread_work_ilocked( - target_thread, - &target_thread->reply_error.work); + binder_enqueue_work_ilocked( + &target_thread->reply_error.work, + &target_thread->todo); wake_up_interruptible(&target_thread->wait); } else { - /* - * Cannot get here for normal operation, but - * we can if multiple synchronous transactions - * are sent without blocking for responses. - * Just ignore the 2nd error in this case. - */ - pr_warn("Unexpected reply error: %u\n", - target_thread->reply_error.cmd); + WARN(1, "Unexpected reply error: %u\n", + target_thread->reply_error.cmd); } binder_inner_proc_unlock(target_thread->proc); binder_thread_dec_tmpref(target_thread); binder_free_transaction(t); return; - } else { - __release(&target_thread->proc->inner_lock); } next = t->from_parent; @@ -2066,8 +1994,8 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) struct binder_object_header *hdr; size_t object_size = 0; - if (buffer->data_size < sizeof(*hdr) || - offset > buffer->data_size - sizeof(*hdr) || + if (offset > buffer->data_size - sizeof(*hdr) || + buffer->data_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) return 0; @@ -2199,64 +2127,6 @@ static bool binder_validate_fixup(struct binder_buffer *b, return (fixup_offset >= last_min_offset); } -/** - * struct binder_task_work_cb - for deferred close - * - * @twork: callback_head for task work - * @fd: fd to close - * - * Structure to pass task work to be handled after - * returning from binder_ioctl() via task_work_add(). - */ -struct binder_task_work_cb { - struct callback_head twork; - struct file *file; -}; - -/** - * binder_do_fd_close() - close list of file descriptors - * @twork: callback head for task work - * - * It is not safe to call ksys_close() during the binder_ioctl() - * function if there is a chance that binder's own file descriptor - * might be closed. This is to meet the requirements for using - * fdget() (see comments for __fget_light()). Therefore use - * task_work_add() to schedule the close operation once we have - * returned from binder_ioctl(). This function is a callback - * for that mechanism and does the actual ksys_close() on the - * given file descriptor. - */ -static void binder_do_fd_close(struct callback_head *twork) -{ - struct binder_task_work_cb *twcb = container_of(twork, - struct binder_task_work_cb, twork); - - fput(twcb->file); - kfree(twcb); -} - -/** - * binder_deferred_fd_close() - schedule a close for the given file-descriptor - * @fd: file-descriptor to close - * - * See comments in binder_do_fd_close(). This function is used to schedule - * a file-descriptor to be closed after returning from binder_ioctl(). - */ -static void binder_deferred_fd_close(int fd) -{ - struct binder_task_work_cb *twcb; - - twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); - if (!twcb) - return; - init_task_work(&twcb->twork, binder_do_fd_close); - __close_fd_get_file_compat(fd, &twcb->file); - if (twcb->file) - task_work_add(current, &twcb->twork, true); - else - kfree(twcb); -} - static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, binder_size_t *failed_at) @@ -2265,7 +2135,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, - "%d buffer release %d, size %zd-%zd, failed at %pK\n", + "%d buffer release %d, size %zd-%zd, failed at %p\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); @@ -2329,17 +2199,12 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } break; case BINDER_TYPE_FD: { - /* - * No need to close the file here since user-space - * closes it for for successfully delivered - * transactions. For transactions that weren't - * delivered, the new fd was never allocated so - * there is no need to close and the fput on the - * file is done when the transaction is torn - * down. - */ - WARN_ON(failed_at && - proc->tsk == current->group_leader); + struct binder_fd_object *fp = to_binder_fd_object(hdr); + + binder_debug(BINDER_DEBUG_TRANSACTION, + " fd %d\n", fp->fd); + if (failed_at) + task_close_fd(proc, fp->fd); } break; case BINDER_TYPE_PTR: /* @@ -2355,21 +2220,12 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, size_t fd_index; binder_size_t fd_buf_size; - if (proc->tsk != current->group_leader) { - /* - * Nothing to do if running in sender context - * The fd fixups have not been applied so no - * fds need to be closed. - */ - continue; - } - fda = to_binder_fd_array_object(hdr); parent = binder_validate_ptr(buffer, fda->parent, off_start, offp - off_start); if (!parent) { - pr_err("transaction release %d bad parent offset\n", + pr_err("transaction release %d bad parent offset", debug_id); continue; } @@ -2396,7 +2252,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); for (fd_index = 0; fd_index < fda->num_fds; fd_index++) - binder_deferred_fd_close(fd_array[fd_index]); + task_close_fd(proc, fd_array[fd_index]); } break; default: pr_err("transaction release %d bad object type %x\n", @@ -2491,15 +2347,11 @@ static int binder_translate_handle(struct flat_binder_object *fp, fp->cookie = node->cookie; if (node->proc) binder_inner_proc_lock(node->proc); - else - __acquire(&node->proc->inner_lock); binder_inc_node_nilocked(node, fp->hdr.type == BINDER_TYPE_BINDER, 0, NULL); if (node->proc) binder_inner_proc_unlock(node->proc); - else - __release(&node->proc->inner_lock); trace_binder_transaction_ref_to_node(t, node, &src_rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> node %d u%016llx\n", @@ -2532,18 +2384,17 @@ static int binder_translate_handle(struct flat_binder_object *fp, return ret; } -static int binder_translate_fd(u32 *fdp, +static int binder_translate_fd(int fd, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) { struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; - struct binder_txn_fd_fixup *fixup; + int target_fd; struct file *file; - int ret = 0; + int ret; bool target_allows_fd; - int fd = *fdp; if (in_reply_to) target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); @@ -2571,24 +2422,19 @@ static int binder_translate_fd(u32 *fdp, goto err_security; } - /* - * Add fixup record for this transaction. The allocation - * of the fd in the target needs to be done from a - * target thread. - */ - fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); - if (!fixup) { + target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); + if (target_fd < 0) { ret = -ENOMEM; - goto err_alloc; + goto err_get_unused_fd; } - fixup->file = file; - fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data; - trace_binder_transaction_fd_send(t, fd, fixup->offset); - list_add_tail(&fixup->fixup_entry, &t->fd_fixups); + task_fd_install(target_proc, target_fd, file); + trace_binder_transaction_fd(t, fd, target_fd); + binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", + fd, target_fd); - return ret; + return target_fd; -err_alloc: +err_get_unused_fd: err_security: fput(file); err_fget: @@ -2602,7 +2448,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, struct binder_thread *thread, struct binder_transaction *in_reply_to) { - binder_size_t fdi, fd_buf_size; + binder_size_t fdi, fd_buf_size, num_installed_fds; + int target_fd; uintptr_t parent_buffer; u32 *fd_array; struct binder_proc *proc = thread->proc; @@ -2634,12 +2481,23 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, return -EINVAL; } for (fdi = 0; fdi < fda->num_fds; fdi++) { - int ret = binder_translate_fd(&fd_array[fdi], t, thread, + target_fd = binder_translate_fd(fd_array[fdi], t, thread, in_reply_to); - if (ret < 0) - return ret; + if (target_fd < 0) + goto err_translate_fd_failed; + fd_array[fdi] = target_fd; } return 0; + +err_translate_fd_failed: + /* + * Failed to allocate fd or security error, free fds + * installed so far. + */ + num_installed_fds = fdi; + for (fdi = 0; fdi < num_installed_fds; fdi++) + task_close_fd(target_proc, fd_array[fdi]); + return target_fd; } static int binder_fixup_parent(struct binder_transaction *t, @@ -2711,18 +2569,20 @@ static bool binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, struct binder_thread *thread) { + struct list_head *target_list = NULL; struct binder_node *node = t->buffer->target_node; bool oneway = !!(t->flags & TF_ONE_WAY); - bool pending_async = false; + bool wakeup = true; BUG_ON(!node); binder_node_lock(node); if (oneway) { BUG_ON(thread); if (node->has_async_transaction) { - pending_async = true; + target_list = &node->async_todo; + wakeup = false; } else { - node->has_async_transaction = true; + node->has_async_transaction = 1; } } @@ -2734,17 +2594,19 @@ static bool binder_proc_transaction(struct binder_transaction *t, return false; } - if (!thread && !pending_async) + if (!thread && !target_list) thread = binder_select_thread_ilocked(proc); if (thread) - binder_enqueue_thread_work_ilocked(thread, &t->work); - else if (!pending_async) - binder_enqueue_work_ilocked(&t->work, &proc->todo); + target_list = &thread->todo; + else if (!target_list) + target_list = &proc->todo; else - binder_enqueue_work_ilocked(&t->work, &node->async_todo); + BUG_ON(target_list != &node->async_todo); + + binder_enqueue_work_ilocked(&t->work, target_list); - if (!pending_async) + if (wakeup) binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); binder_inner_proc_unlock(proc); @@ -2802,7 +2664,6 @@ static void binder_transaction(struct binder_proc *proc, { int ret; struct binder_transaction *t; - struct binder_work *w; struct binder_work *tcomplete; binder_size_t *offp, *off_end, *off_start; binder_size_t off_min; @@ -2819,8 +2680,6 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t last_fixup_min_off = 0; struct binder_context *context = proc->context; int t_debug_id = atomic_inc_return(&binder_last_id); - char *secctx = NULL; - u32 secctx_sz = 0; e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; @@ -2865,8 +2724,6 @@ static void binder_transaction(struct binder_proc *proc, binder_set_nice(in_reply_to->saved_priority); target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); if (target_thread == NULL) { - /* annotation for sparse */ - __release(&target_thread->proc->inner_lock); return_error = BR_DEAD_REPLY; return_error_line = __LINE__; goto err_dead_binder; @@ -2922,14 +2779,6 @@ static void binder_transaction(struct binder_proc *proc, else return_error = BR_DEAD_REPLY; mutex_unlock(&context->context_mgr_node_lock); - if (target_node && target_proc->pid == proc->pid) { - binder_user_error("%d:%d got transaction to context manager from process owning it\n", - proc->pid, thread->pid); - return_error = BR_FAILED_REPLY; - return_error_param = -EINVAL; - return_error_line = __LINE__; - goto err_invalid_target_handle; - } } if (!target_node) { /* @@ -2948,29 +2797,6 @@ static void binder_transaction(struct binder_proc *proc, goto err_invalid_target_handle; } binder_inner_proc_lock(proc); - - w = list_first_entry_or_null(&thread->todo, - struct binder_work, entry); - if (!(tr->flags & TF_ONE_WAY) && w && - w->type == BINDER_WORK_TRANSACTION) { - /* - * Do not allow new outgoing transaction from a - * thread that has a transaction at the head of - * its todo list. Only need to check the head - * because binder_select_thread_ilocked picks a - * thread from proc->waiting_threads to enqueue - * the transaction, and nothing is queued to the - * todo list while the thread is on waiting_threads. - */ - binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", - proc->pid, thread->pid); - binder_inner_proc_unlock(proc); - return_error = BR_FAILED_REPLY; - return_error_param = -EPROTO; - return_error_line = __LINE__; - goto err_bad_todo_list; - } - if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; @@ -3018,7 +2844,6 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_alloc_t_failed; } - INIT_LIST_HEAD(&t->fd_fixups); binder_stats_created(BINDER_STAT_TRANSACTION); spin_lock_init(&t->lock); @@ -3063,25 +2888,6 @@ static void binder_transaction(struct binder_proc *proc, t->flags = tr->flags; t->priority = task_nice(current); - -// Remove the security check for kernel under 5.0 -/* - - if (target_node && target_node->txn_security_ctx) { - u32 secid; - - security_task_getsecid(proc->tsk, &secid); - ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); - if (ret) { - return_error = BR_FAILED_REPLY; - return_error_param = ret; - return_error_line = __LINE__; - goto err_get_secctx_failed; - } - extra_buffers_size += ALIGN(secctx_sz, sizeof(u64)); - } -*/ - trace_binder_transaction(reply, t, target_node); t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, @@ -3098,19 +2904,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = NULL; goto err_binder_alloc_buf_failed; } - if (secctx) { - size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + - ALIGN(tr->offsets_size, sizeof(void *)) + - ALIGN(extra_buffers_size, sizeof(void *)) - - ALIGN(secctx_sz, sizeof(u64)); - char *kptr = t->buffer->data + buf_offset; - - t->security_ctx = (uintptr_t)kptr + - binder_alloc_get_user_buffer_offset(&target_proc->alloc); - memcpy(kptr, secctx, secctx_sz); - security_release_secctx(secctx, secctx_sz); - secctx = NULL; - } + t->buffer->allow_user_free = 0; t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; @@ -3205,16 +2999,17 @@ static void binder_transaction(struct binder_proc *proc, case BINDER_TYPE_FD: { struct binder_fd_object *fp = to_binder_fd_object(hdr); - int ret = binder_translate_fd(&fp->fd, t, thread, - in_reply_to); + int target_fd = binder_translate_fd(fp->fd, t, thread, + in_reply_to); - if (ret < 0) { + if (target_fd < 0) { return_error = BR_FAILED_REPLY; - return_error_param = ret; + return_error_param = target_fd; return_error_line = __LINE__; goto err_translate_failed; } fp->pad_binder = 0; + fp->fd = target_fd; } break; case BINDER_TYPE_FDA: { struct binder_fd_array_object *fda = @@ -3306,10 +3101,10 @@ static void binder_transaction(struct binder_proc *proc, } } tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + binder_enqueue_work(proc, tcomplete, &thread->todo); t->work.type = BINDER_WORK_TRANSACTION; if (reply) { - binder_enqueue_thread_work(thread, tcomplete); binder_inner_proc_lock(target_proc); if (target_thread->is_dead) { binder_inner_proc_unlock(target_proc); @@ -3317,21 +3112,13 @@ static void binder_transaction(struct binder_proc *proc, } BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction_ilocked(target_thread, in_reply_to); - binder_enqueue_thread_work_ilocked(target_thread, &t->work); + binder_enqueue_work_ilocked(&t->work, &target_thread->todo); binder_inner_proc_unlock(target_proc); wake_up_interruptible_sync(&target_thread->wait); binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); binder_inner_proc_lock(proc); - /* - * Defer the TRANSACTION_COMPLETE, so we don't return to - * userspace immediately; this allows the target process to - * immediately start processing this transaction, reducing - * latency. We will then return the TRANSACTION_COMPLETE when - * the target replies (or there is an error). - */ - binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; @@ -3345,7 +3132,6 @@ static void binder_transaction(struct binder_proc *proc, } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); - binder_enqueue_thread_work(thread, tcomplete); if (!binder_proc_transaction(t, target_proc, NULL)) goto err_dead_proc_or_thread; } @@ -3371,7 +3157,6 @@ static void binder_transaction(struct binder_proc *proc, err_bad_offset: err_bad_parent: err_copy_data_failed: - binder_free_txn_fixups(t); trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, t->buffer, offp); if (target_node) @@ -3380,18 +3165,12 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->transaction = NULL; binder_alloc_free_buf(&target_proc->alloc, t->buffer); err_binder_alloc_buf_failed: - if (secctx) - security_release_secctx(secctx, secctx_sz); -/* -err_get_secctx_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); -*/ err_alloc_tcomplete_failed: kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: -err_bad_todo_list: err_bad_call_stack: err_empty_call_stack: err_dead_binder: @@ -3431,57 +3210,18 @@ static void binder_transaction(struct binder_proc *proc, BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { thread->return_error.cmd = BR_TRANSACTION_COMPLETE; - binder_enqueue_thread_work(thread, &thread->return_error.work); + binder_enqueue_work(thread->proc, + &thread->return_error.work, + &thread->todo); binder_send_failed_reply(in_reply_to, return_error); } else { thread->return_error.cmd = return_error; - binder_enqueue_thread_work(thread, &thread->return_error.work); + binder_enqueue_work(thread->proc, + &thread->return_error.work, + &thread->todo); } } -/** - * binder_free_buf() - free the specified buffer - * @proc: binder proc that owns buffer - * @buffer: buffer to be freed - * - * If buffer for an async transaction, enqueue the next async - * transaction from the node. - * - * Cleanup buffer and free it. - */ -static void -binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) -{ - binder_inner_proc_lock(proc); - if (buffer->transaction) { - buffer->transaction->buffer = NULL; - buffer->transaction = NULL; - } - binder_inner_proc_unlock(proc); - if (buffer->async_transaction && buffer->target_node) { - struct binder_node *buf_node; - struct binder_work *w; - - buf_node = buffer->target_node; - binder_node_inner_lock(buf_node); - BUG_ON(!buf_node->has_async_transaction); - BUG_ON(buf_node->proc != proc); - w = binder_dequeue_work_head_ilocked( - &buf_node->async_todo); - if (!w) { - buf_node->has_async_transaction = false; - } else { - binder_enqueue_work_ilocked( - w, &proc->todo); - binder_wakeup_proc_ilocked(proc); - } - binder_node_inner_unlock(buf_node); - } - trace_binder_transaction_buffer_release(buffer); - binder_transaction_buffer_release(proc, buffer, NULL); - binder_alloc_free_buf(&proc->alloc, buffer); -} - static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, @@ -3653,18 +3393,14 @@ static int binder_thread_write(struct binder_proc *proc, buffer = binder_alloc_prepare_to_free(&proc->alloc, data_ptr); - if (IS_ERR_OR_NULL(buffer)) { - if (PTR_ERR(buffer) == -EPERM) { - binder_user_error( - "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", - proc->pid, thread->pid, - (u64)data_ptr); - } else { - binder_user_error( - "%d:%d BC_FREE_BUFFER u%016llx no match\n", - proc->pid, thread->pid, - (u64)data_ptr); - } + if (buffer == NULL) { + binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", + proc->pid, thread->pid, (u64)data_ptr); + break; + } + if (!buffer->allow_user_free) { + binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", + proc->pid, thread->pid, (u64)data_ptr); break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, @@ -3672,7 +3408,33 @@ static int binder_thread_write(struct binder_proc *proc, proc->pid, thread->pid, (u64)data_ptr, buffer->debug_id, buffer->transaction ? "active" : "finished"); - binder_free_buf(proc, buffer); + + if (buffer->transaction) { + buffer->transaction->buffer = NULL; + buffer->transaction = NULL; + } + if (buffer->async_transaction && buffer->target_node) { + struct binder_node *buf_node; + struct binder_work *w; + + buf_node = buffer->target_node; + binder_node_inner_lock(buf_node); + BUG_ON(!buf_node->has_async_transaction); + BUG_ON(buf_node->proc != proc); + w = binder_dequeue_work_head_ilocked( + &buf_node->async_todo); + if (!w) { + buf_node->has_async_transaction = 0; + } else { + binder_enqueue_work_ilocked( + w, &proc->todo); + binder_wakeup_proc_ilocked(proc); + } + binder_node_inner_unlock(buf_node); + } + trace_binder_transaction_buffer_release(buffer); + binder_transaction_buffer_release(proc, buffer, NULL); + binder_alloc_free_buf(&proc->alloc, buffer); break; } @@ -3760,9 +3522,10 @@ static int binder_thread_write(struct binder_proc *proc, WARN_ON(thread->return_error.cmd != BR_OK); thread->return_error.cmd = BR_ERROR; - binder_enqueue_thread_work( - thread, - &thread->return_error.work); + binder_enqueue_work( + thread->proc, + &thread->return_error.work, + &thread->todo); binder_debug( BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", @@ -3842,9 +3605,9 @@ static int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_thread_work_ilocked( - thread, - &death->work); + binder_enqueue_work_ilocked( + &death->work, + &thread->todo); else { binder_enqueue_work_ilocked( &death->work, @@ -3884,7 +3647,7 @@ static int binder_thread_write(struct binder_proc *proc, } } binder_debug(BINDER_DEBUG_DEAD_BINDER, - "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", + "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", proc->pid, thread->pid, (u64)cookie, death); if (death == NULL) { @@ -3899,8 +3662,8 @@ static int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_thread_work_ilocked( - thread, &death->work); + binder_enqueue_work_ilocked( + &death->work, &thread->todo); else { binder_enqueue_work_ilocked( &death->work, @@ -3995,76 +3758,6 @@ static int binder_wait_for_work(struct binder_thread *thread, return ret; } -/** - * binder_apply_fd_fixups() - finish fd translation - * @t: binder transaction with list of fd fixups - * - * Now that we are in the context of the transaction target - * process, we can allocate and install fds. Process the - * list of fds to translate and fixup the buffer with the - * new fds. - * - * If we fail to allocate an fd, then free the resources by - * fput'ing files that have not been processed and ksys_close'ing - * any fds that have already been allocated. - */ -static int binder_apply_fd_fixups(struct binder_transaction *t) -{ - struct binder_txn_fd_fixup *fixup, *tmp; - int ret = 0; - - list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { - int fd = get_unused_fd_flags(O_CLOEXEC); - u32 *fdp; - - if (fd < 0) { - binder_debug(BINDER_DEBUG_TRANSACTION, - "failed fd fixup txn %d fd %d\n", - t->debug_id, fd); - ret = -ENOMEM; - break; - } - binder_debug(BINDER_DEBUG_TRANSACTION, - "fd fixup txn %d fd %d\n", - t->debug_id, fd); - trace_binder_transaction_fd_recv(t, fd, fixup->offset); - fd_install(fd, fixup->file); - fixup->file = NULL; - fdp = (u32 *)(t->buffer->data + fixup->offset); - /* - * This store can cause problems for CPUs with a - * VIVT cache (eg ARMv5) since the cache cannot - * detect virtual aliases to the same physical cacheline. - * To support VIVT, this address and the user-space VA - * would both need to be flushed. Since this kernel - * VA is not constructed via page_to_virt(), we can't - * use flush_dcache_page() on it, so we'd have to use - * an internal function. If devices with VIVT ever - * need to run Android, we'll either need to go back - * to patching the translated fd from the sender side - * (using the non-standard kernel functions), or rework - * how the kernel uses the buffer to use page_to_virt() - * addresses instead of allocating in our own vm area. - * - * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT. - */ - *fdp = fd; - } - list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { - if (fixup->file) { - fput(fixup->file); - } else if (ret) { - u32 *fdp = (u32 *)(t->buffer->data + fixup->offset); - - binder_deferred_fd_close(*fdp); - } - list_del(&fixup->fixup_entry); - kfree(fixup); - } - - return ret; -} - static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, @@ -4118,13 +3811,11 @@ static int binder_thread_read(struct binder_proc *proc, while (1) { uint32_t cmd; - struct binder_transaction_data_secctx tr; - struct binder_transaction_data *trd = &tr.transaction_data; + struct binder_transaction_data tr; struct binder_work *w = NULL; struct list_head *list = NULL; struct binder_transaction *t = NULL; struct binder_thread *t_from; - size_t trsize = sizeof(*trd); binder_inner_proc_lock(proc); if (!binder_worklist_empty_ilocked(&thread->todo)) @@ -4146,8 +3837,6 @@ static int binder_thread_read(struct binder_proc *proc, break; } w = binder_dequeue_work_head_ilocked(list); - if (binder_worklist_empty_ilocked(&thread->todo)) - thread->process_todo = false; switch (w->type) { case BINDER_WORK_TRANSACTION: { @@ -4162,17 +3851,14 @@ static int binder_thread_read(struct binder_proc *proc, binder_inner_proc_unlock(proc); if (put_user(e->cmd, (uint32_t __user *)ptr)) return -EFAULT; - cmd = e->cmd; e->cmd = BR_OK; ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, cmd); + binder_stat_br(proc, thread, e->cmd); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_inner_proc_unlock(proc); cmd = BR_TRANSACTION_COMPLETE; - kfree(w); - binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); @@ -4181,6 +3867,8 @@ static int binder_thread_read(struct binder_proc *proc, binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); + kfree(w); + binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); @@ -4310,11 +3998,6 @@ static int binder_thread_read(struct binder_proc *proc, if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; - default: - binder_inner_proc_unlock(proc); - pr_err("%d:%d: bad work type %d\n", - proc->pid, thread->pid, w->type); - break; } if (!t) @@ -4324,8 +4007,8 @@ static int binder_thread_read(struct binder_proc *proc, if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; - trd->target.ptr = target_node->ptr; - trd->cookie = target_node->cookie; + tr.target.ptr = target_node->ptr; + tr.cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) @@ -4335,67 +4018,33 @@ static int binder_thread_read(struct binder_proc *proc, binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { - trd->target.ptr = 0; - trd->cookie = 0; + tr.target.ptr = 0; + tr.cookie = 0; cmd = BR_REPLY; } - trd->code = t->code; - trd->flags = t->flags; - trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); + tr.code = t->code; + tr.flags = t->flags; + tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); t_from = binder_get_txn_from(t); if (t_from) { struct task_struct *sender = t_from->proc->tsk; - trd->sender_pid = - task_tgid_nr_ns(sender, - task_active_pid_ns(current)); + tr.sender_pid = task_tgid_nr_ns(sender, + task_active_pid_ns(current)); } else { - trd->sender_pid = 0; + tr.sender_pid = 0; } - ret = binder_apply_fd_fixups(t); - if (ret) { - struct binder_buffer *buffer = t->buffer; - bool oneway = !!(t->flags & TF_ONE_WAY); - int tid = t->debug_id; - - if (t_from) - binder_thread_dec_tmpref(t_from); - buffer->transaction = NULL; - binder_cleanup_transaction(t, "fd fixups failed", - BR_FAILED_REPLY); - binder_free_buf(proc, buffer); - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", - proc->pid, thread->pid, - oneway ? "async " : - (cmd == BR_REPLY ? "reply " : ""), - tid, BR_FAILED_REPLY, ret, __LINE__); - if (cmd == BR_REPLY) { - cmd = BR_FAILED_REPLY; - if (put_user(cmd, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, cmd); - break; - } - continue; - } - trd->data_size = t->buffer->data_size; - trd->offsets_size = t->buffer->offsets_size; - trd->data.ptr.buffer = (binder_uintptr_t) + tr.data_size = t->buffer->data_size; + tr.offsets_size = t->buffer->offsets_size; + tr.data.ptr.buffer = (binder_uintptr_t) ((uintptr_t)t->buffer->data + binder_alloc_get_user_buffer_offset(&proc->alloc)); - trd->data.ptr.offsets = trd->data.ptr.buffer + + tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); - tr.secctx = t->security_ctx; - if (t->security_ctx) { - cmd = BR_TRANSACTION_SEC_CTX; - trsize = sizeof(tr); - } if (put_user(cmd, (uint32_t __user *)ptr)) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4406,7 +4055,7 @@ static int binder_thread_read(struct binder_proc *proc, return -EFAULT; } ptr += sizeof(uint32_t); - if (copy_to_user(ptr, &tr, trsize)) { + if (copy_to_user(ptr, &tr, sizeof(tr))) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4415,7 +4064,7 @@ static int binder_thread_read(struct binder_proc *proc, return -EFAULT; } - ptr += trsize; + ptr += sizeof(tr); trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); @@ -4423,18 +4072,16 @@ static int binder_thread_read(struct binder_proc *proc, "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : - (cmd == BR_TRANSACTION_SEC_CTX) ? - "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", + "BR_REPLY", t->debug_id, t_from ? t_from->proc->pid : 0, t_from ? t_from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, - (u64)trd->data.ptr.buffer, - (u64)trd->data.ptr.offsets); + (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); if (t_from) binder_thread_dec_tmpref(t_from); t->buffer->allow_user_free = 1; - if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { + if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { binder_inner_proc_lock(thread->proc); t->to_parent = thread->transaction_stack; t->to_thread = thread; @@ -4626,8 +4273,6 @@ static int binder_thread_release(struct binder_proc *proc, spin_lock(&t->lock); if (t->to_thread == thread) send_reply = t; - } else { - __acquire(&t->lock); } thread->is_dead = true; @@ -4656,11 +4301,7 @@ static int binder_thread_release(struct binder_proc *proc, spin_unlock(&last_t->lock); if (t) spin_lock(&t->lock); - else - __acquire(&t->lock); } - /* annotation for sparse, lock not acquired in last iteration above */ - __release(&t->lock); /* * If this thread used poll, make sure we remove the waitqueue @@ -4670,20 +4311,11 @@ static int binder_thread_release(struct binder_proc *proc, */ if ((thread->looper & BINDER_LOOPER_STATE_POLL) && waitqueue_active(&thread->wait)) { - wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); + wake_up_poll(&thread->wait, POLLHUP | POLLFREE); } binder_inner_proc_unlock(thread->proc); - /* - * This is needed to avoid races between wake_up_poll() above and - * and ep_remove_waitqueue() called for other reasons (eg the epoll file - * descriptor being closed); ep_remove_waitqueue() holds an RCU read - * lock, so we can be sure it's done after calling synchronize_rcu(). - */ - if (thread->looper & BINDER_LOOPER_STATE_POLL) - synchronize_rcu(); - if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); binder_release_work(proc, &thread->todo); @@ -4691,7 +4323,7 @@ static int binder_thread_release(struct binder_proc *proc, return active_transactions; } -static __poll_t binder_poll(struct file *filp, +static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait) { struct binder_proc *proc = filp->private_data; @@ -4699,8 +4331,6 @@ static __poll_t binder_poll(struct file *filp, bool wait_for_proc_work; thread = binder_get_thread(proc); - if (!thread) - return POLLERR; binder_inner_proc_lock(thread->proc); thread->looper |= BINDER_LOOPER_STATE_POLL; @@ -4711,7 +4341,7 @@ static __poll_t binder_poll(struct file *filp, poll_wait(filp, &thread->wait, wait); if (binder_has_work(thread, wait_for_proc_work)) - return EPOLLIN; + return POLLIN; return 0; } @@ -4782,8 +4412,7 @@ static int binder_ioctl_write_read(struct file *filp, return ret; } -static int binder_ioctl_set_ctx_mgr(struct file *filp, - struct flat_binder_object *fbo) +static int binder_ioctl_set_ctx_mgr(struct file *filp) { int ret = 0; struct binder_proc *proc = filp->private_data; @@ -4812,7 +4441,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp, } else { context->binder_context_mgr_uid = curr_euid; } - new_node = binder_new_node(proc, fbo); + new_node = binder_new_node(proc, NULL); if (!new_node) { ret = -ENOMEM; goto out; @@ -4830,42 +4459,6 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp, return ret; } -static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, - struct binder_node_info_for_ref *info) -{ - struct binder_node *node; - struct binder_context *context = proc->context; - __u32 handle = info->handle; - - if (info->strong_count || info->weak_count || info->reserved1 || - info->reserved2 || info->reserved3) { - binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", - proc->pid); - return -EINVAL; - } - - /* This ioctl may only be used by the context manager */ - mutex_lock(&context->context_mgr_node_lock); - if (!context->binder_context_mgr_node || - context->binder_context_mgr_node->proc != proc) { - mutex_unlock(&context->context_mgr_node_lock); - return -EPERM; - } - mutex_unlock(&context->context_mgr_node_lock); - - node = binder_get_node_from_ref(proc, handle, true, NULL); - if (!node) - return -EINVAL; - - info->strong_count = node->local_strong_refs + - node->internal_strong_refs; - info->weak_count = node->local_weak_refs; - - binder_put_node(node); - - return 0; -} - static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, struct binder_node_debug_info *info) { @@ -4935,20 +4528,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) binder_inner_proc_unlock(proc); break; } - case BINDER_SET_CONTEXT_MGR_EXT: { - struct flat_binder_object fbo; - - if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { - ret = -EINVAL; - goto err; - } - ret = binder_ioctl_set_ctx_mgr(filp, &fbo); - if (ret) - goto err; - break; - } case BINDER_SET_CONTEXT_MGR: - ret = binder_ioctl_set_ctx_mgr(filp, NULL); + ret = binder_ioctl_set_ctx_mgr(filp); if (ret) goto err; break; @@ -4972,25 +4553,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; } - case BINDER_GET_NODE_INFO_FOR_REF: { - struct binder_node_info_for_ref info; - - if (copy_from_user(&info, ubuf, sizeof(info))) { - ret = -EFAULT; - goto err; - } - - ret = binder_ioctl_get_node_info_for_ref(proc, &info); - if (ret < 0) - goto err; - - if (copy_to_user(ubuf, &info, sizeof(info))) { - ret = -EFAULT; - goto err; - } - - break; - } case BINDER_GET_NODE_DEBUG_INFO: { struct binder_node_debug_info info; @@ -5046,9 +4608,14 @@ static void binder_vma_close(struct vm_area_struct *vma) (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); binder_alloc_vma_close(&proc->alloc); + binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } -static vm_fault_t binder_vm_fault(struct vm_fault *vmf) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +static int binder_vm_fault(struct vm_fault *vmf) +#else +static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +#endif { return VM_FAULT_SIGBUS; } @@ -5082,19 +4649,20 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) failure_string = "bad vm_flags"; goto err_bad_arg; } - vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; - vma->vm_flags &= ~VM_MAYWRITE; - + vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; ret = binder_alloc_mmap_handler(&proc->alloc, vma); if (ret) return ret; + mutex_lock(&proc->files_lock); + proc->files = get_files_struct(current); + mutex_unlock(&proc->files_lock); return 0; err_bad_arg: - pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, + pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } @@ -5104,7 +4672,7 @@ static int binder_open(struct inode *nodp, struct file *filp) struct binder_proc *proc; struct binder_device *binder_dev; - binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, + binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); @@ -5114,6 +4682,7 @@ static int binder_open(struct inode *nodp, struct file *filp) spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; + mutex_init(&proc->files_lock); INIT_LIST_HEAD(&proc->todo); proc->default_priority = task_nice(current); /* binderfs stashes devices in i_private */ @@ -5146,10 +4715,10 @@ static int binder_open(struct inode *nodp, struct file *filp) * anyway print all contexts that a given PID has, so this * is not a problem. */ - proc->debugfs_entry = debugfs_create_file(strbuf, 0444, + proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, binder_debugfs_dir_entry_proc, (void *)(unsigned long)proc->pid, - &proc_fops); + &binder_proc_fops); } return 0; @@ -5267,6 +4836,8 @@ static void binder_deferred_release(struct binder_proc *proc) struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; + BUG_ON(proc->files); + mutex_lock(&binder_procs_lock); hlist_del(&proc->proc_node); mutex_unlock(&binder_procs_lock); @@ -5348,6 +4919,7 @@ static void binder_deferred_release(struct binder_proc *proc) static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; + struct files_struct *files; int defer; @@ -5365,11 +4937,23 @@ static void binder_deferred_func(struct work_struct *work) } mutex_unlock(&binder_deferred_lock); + files = NULL; + if (defer & BINDER_DEFERRED_PUT_FILES) { + mutex_lock(&proc->files_lock); + files = proc->files; + if (files) + proc->files = NULL; + mutex_unlock(&proc->files_lock); + } + if (defer & BINDER_DEFERRED_FLUSH) binder_deferred_flush(proc); if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ + + if (files) + put_files_struct(files); } while (proc); } static DECLARE_WORK(binder_deferred_work, binder_deferred_func); @@ -5398,7 +4982,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, spin_lock(&t->lock); to_proc = t->to_proc; seq_printf(m, - "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", + "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, @@ -5422,7 +5006,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, } if (buffer->target_node) seq_printf(m, " node %d", buffer->target_node->debug_id); - seq_printf(m, " size %zd:%zd data %pK\n", + seq_printf(m, " size %zd:%zd data %p\n", buffer->data_size, buffer->offsets_size, buffer->data); } @@ -5573,9 +5157,6 @@ static void print_binder_proc(struct seq_file *m, for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); - if (!print_all && !node->has_async_transaction) - continue; - /* * take a temporary reference on the node so it * survives and isn't removed from the tree @@ -5780,7 +5361,7 @@ static void print_binder_proc_stats(struct seq_file *m, } -static int state_show(struct seq_file *m, void *unused) +static int binder_state_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct binder_node *node; @@ -5819,7 +5400,7 @@ static int state_show(struct seq_file *m, void *unused) return 0; } -static int stats_show(struct seq_file *m, void *unused) +static int binder_stats_show(struct seq_file *m, void *unused) { struct binder_proc *proc; @@ -5835,7 +5416,7 @@ static int stats_show(struct seq_file *m, void *unused) return 0; } -static int transactions_show(struct seq_file *m, void *unused) +static int binder_transactions_show(struct seq_file *m, void *unused) { struct binder_proc *proc; @@ -5848,7 +5429,7 @@ static int transactions_show(struct seq_file *m, void *unused) return 0; } -static int proc_show(struct seq_file *m, void *unused) +static int binder_proc_show(struct seq_file *m, void *unused) { struct binder_proc *itr; int pid = (unsigned long)m->private; @@ -5891,7 +5472,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m, "\n" : " (incomplete)\n"); } -static int transaction_log_show(struct seq_file *m, void *unused) +static int binder_transaction_log_show(struct seq_file *m, void *unused) { struct binder_transaction_log *log = m->private; unsigned int log_cur = atomic_read(&log->cur); @@ -5923,10 +5504,12 @@ const struct file_operations binder_fops = { .release = binder_release, }; -DEFINE_SHOW_ATTRIBUTE(state); -DEFINE_SHOW_ATTRIBUTE(stats); -DEFINE_SHOW_ATTRIBUTE(transactions); -DEFINE_SHOW_ATTRIBUTE(transaction_log); +EXPORT_SYMBOL(binder_fops); + +BINDER_DEBUG_ENTRY(state); +BINDER_DEBUG_ENTRY(stats); +BINDER_DEBUG_ENTRY(transactions); +BINDER_DEBUG_ENTRY(transaction_log); static int __init init_binder_device(const char *name) { @@ -5959,14 +5542,11 @@ static int __init init_binder_device(const char *name) static int __init binder_init(void) { int ret; - char *device_name, *device_tmp; + char *device_name, *device_names, *device_tmp; struct binder_device *device; struct hlist_node *tmp; - char *device_names = NULL; - ret = binder_alloc_shrinker_init(); - if (ret) - return ret; + binder_alloc_shrinker_init(); atomic_set(&binder_transaction_log.cur, ~0U); atomic_set(&binder_transaction_log_failed.cur, ~0U); @@ -5978,54 +5558,49 @@ static int __init binder_init(void) if (binder_debugfs_dir_entry_root) { debugfs_create_file("state", - 0444, + S_IRUGO, binder_debugfs_dir_entry_root, NULL, - &state_fops); + &binder_state_fops); debugfs_create_file("stats", - 0444, + S_IRUGO, binder_debugfs_dir_entry_root, NULL, - &stats_fops); + &binder_stats_fops); debugfs_create_file("transactions", - 0444, + S_IRUGO, binder_debugfs_dir_entry_root, NULL, - &transactions_fops); + &binder_transactions_fops); debugfs_create_file("transaction_log", - 0444, + S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log, - &transaction_log_fops); + &binder_transaction_log_fops); debugfs_create_file("failed_transaction_log", - 0444, + S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log_failed, - &transaction_log_fops); + &binder_transaction_log_fops); } - if (strcmp(binder_devices_param, "") != 0) { - /* - * Copy the module_parameter string, because we don't want to - * tokenize it in-place. - */ - device_names = kstrdup(binder_devices_param, GFP_KERNEL); - if (!device_names) { - ret = -ENOMEM; - goto err_alloc_device_names_failed; - } - - device_tmp = device_names; - while ((device_name = strsep(&device_tmp, ","))) { - ret = init_binder_device(device_name); - if (ret) - goto err_init_binder_device_failed; - } + /* + * Copy the module_parameter string, because we don't want to + * tokenize it in-place. + */ + device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); + if (!device_names) { + ret = -ENOMEM; + goto err_alloc_device_names_failed; } + strcpy(device_names, binder_devices_param); - ret = init_binderfs(); - if (ret) - goto err_init_binder_device_failed; + device_tmp = device_names; + while ((device_name = strsep(&device_tmp, ","))) { + ret = init_binder_device(device_name); + if (ret) + goto err_init_binder_device_failed; + } return ret; @@ -6044,20 +5619,25 @@ static int __init binder_init(void) return ret; } -module_init(binder_init); -/* - * binder will have no exit function since binderfs instances can be mounted - * multiple times and also in user namespaces finding and destroying them all - * is not feasible without introducing insane locking. Just ignoring existing - * instances on module unload also wouldn't work since we would loose track of - * what major numer was dynamically allocated and also what minor numbers are - * already given out. So this would get us into all kinds of issues with device - * number reuse. So simply don't allow unloading unless we are forced to do so. - */ +static void __exit binder_exit(void) +{ + struct binder_device *device; + struct hlist_node *tmp; -MODULE_AUTHOR("Google, Inc."); -MODULE_DESCRIPTION("Driver for Android binder device"); -MODULE_LICENSE("GPL v2"); + hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { + misc_deregister(&device->miscdev); + kfree(device->context.name); + hlist_del(&device->hlist); + kfree(device); + } + + debugfs_remove_recursive(binder_debugfs_dir_entry_root); +} + +module_init(binder_init); +module_exit(binder_exit); #define CREATE_TRACE_POINTS #include "binder_trace.h" + +MODULE_LICENSE("GPL v2"); diff --git a/binder/binder.h b/binder/binder.h deleted file mode 100644 index 35aac58..0000000 --- a/binder/binder.h +++ /dev/null @@ -1,504 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2008 Google, Inc. - * - * Based on, but no longer compatible with, the original - * OpenBinder.org binder driver interface, which is: - * - * Copyright (c) 2005 Palmsource, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef _UAPI_LINUX_BINDER_H -#define _UAPI_LINUX_BINDER_H - -#include -#include - -#define B_PACK_CHARS(c1, c2, c3, c4) \ - ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) -#define B_TYPE_LARGE 0x85 - -#ifndef EPOLLHUP -#define EPOLLHUP POLLHUP -#endif - -#ifndef EPOLLIN -#define EPOLLIN POLLIN -#endif - -enum { - BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), - BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), - BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), - BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), - BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), - BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE), - BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE), -}; - -enum { - FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, - FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, - - /** - * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts - * - * Only when set, causes senders to include their security - * context - */ - FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000, -}; - -#ifdef BINDER_IPC_32BIT -typedef __u32 binder_size_t; -typedef __u32 binder_uintptr_t; -#else -typedef __u64 binder_size_t; -typedef __u64 binder_uintptr_t; -#endif - -/** - * struct binder_object_header - header shared by all binder metadata objects. - * @type: type of the object - */ -struct binder_object_header { - __u32 type; -}; - -/* - * This is the flattened representation of a Binder object for transfer - * between processes. The 'offsets' supplied as part of a binder transaction - * contains offsets into the data where these structures occur. The Binder - * driver takes care of re-writing the structure type and data as it moves - * between processes. - */ -struct flat_binder_object { - struct binder_object_header hdr; - __u32 flags; - - /* 8 bytes of data. */ - union { - binder_uintptr_t binder; /* local object */ - __u32 handle; /* remote object */ - }; - - /* extra data associated with local object */ - binder_uintptr_t cookie; -}; - -/** - * struct binder_fd_object - describes a filedescriptor to be fixed up. - * @hdr: common header structure - * @pad_flags: padding to remain compatible with old userspace code - * @pad_binder: padding to remain compatible with old userspace code - * @fd: file descriptor - * @cookie: opaque data, used by user-space - */ -struct binder_fd_object { - struct binder_object_header hdr; - __u32 pad_flags; - union { - binder_uintptr_t pad_binder; - __u32 fd; - }; - - binder_uintptr_t cookie; -}; - -/* struct binder_buffer_object - object describing a userspace buffer - * @hdr: common header structure - * @flags: one or more BINDER_BUFFER_* flags - * @buffer: address of the buffer - * @length: length of the buffer - * @parent: index in offset array pointing to parent buffer - * @parent_offset: offset in @parent pointing to this buffer - * - * A binder_buffer object represents an object that the - * binder kernel driver can copy verbatim to the target - * address space. A buffer itself may be pointed to from - * within another buffer, meaning that the pointer inside - * that other buffer needs to be fixed up as well. This - * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT - * flag in @flags, by setting @parent buffer to the index - * in the offset array pointing to the parent binder_buffer_object, - * and by setting @parent_offset to the offset in the parent buffer - * at which the pointer to this buffer is located. - */ -struct binder_buffer_object { - struct binder_object_header hdr; - __u32 flags; - binder_uintptr_t buffer; - binder_size_t length; - binder_size_t parent; - binder_size_t parent_offset; -}; - -enum { - BINDER_BUFFER_FLAG_HAS_PARENT = 0x01, -}; - -/* struct binder_fd_array_object - object describing an array of fds in a buffer - * @hdr: common header structure - * @pad: padding to ensure correct alignment - * @num_fds: number of file descriptors in the buffer - * @parent: index in offset array to buffer holding the fd array - * @parent_offset: start offset of fd array in the buffer - * - * A binder_fd_array object represents an array of file - * descriptors embedded in a binder_buffer_object. It is - * different from a regular binder_buffer_object because it - * describes a list of file descriptors to fix up, not an opaque - * blob of memory, and hence the kernel needs to treat it differently. - * - * An example of how this would be used is with Android's - * native_handle_t object, which is a struct with a list of integers - * and a list of file descriptors. The native_handle_t struct itself - * will be represented by a struct binder_buffer_objct, whereas the - * embedded list of file descriptors is represented by a - * struct binder_fd_array_object with that binder_buffer_object as - * a parent. - */ -struct binder_fd_array_object { - struct binder_object_header hdr; - __u32 pad; - binder_size_t num_fds; - binder_size_t parent; - binder_size_t parent_offset; -}; - -/* - * On 64-bit platforms where user code may run in 32-bits the driver must - * translate the buffer (and local binder) addresses appropriately. - */ - -struct binder_write_read { - binder_size_t write_size; /* bytes to write */ - binder_size_t write_consumed; /* bytes consumed by driver */ - binder_uintptr_t write_buffer; - binder_size_t read_size; /* bytes to read */ - binder_size_t read_consumed; /* bytes consumed by driver */ - binder_uintptr_t read_buffer; -}; - -/* Use with BINDER_VERSION, driver fills in fields. */ -struct binder_version { - /* driver protocol version -- increment with incompatible change */ - __s32 protocol_version; -}; - -/* This is the current protocol version. */ -#ifdef BINDER_IPC_32BIT -#define BINDER_CURRENT_PROTOCOL_VERSION 7 -#else -#define BINDER_CURRENT_PROTOCOL_VERSION 8 -#endif - -/* - * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields. - * Set ptr to NULL for the first call to get the info for the first node, and - * then repeat the call passing the previously returned value to get the next - * nodes. ptr will be 0 when there are no more nodes. - */ -struct binder_node_debug_info { - binder_uintptr_t ptr; - binder_uintptr_t cookie; - __u32 has_strong_ref; - __u32 has_weak_ref; -}; - -struct binder_node_info_for_ref { - __u32 handle; - __u32 strong_count; - __u32 weak_count; - __u32 reserved1; - __u32 reserved2; - __u32 reserved3; -}; - -#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) -#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) -#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) -#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) -#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) -#define BINDER_THREAD_EXIT _IOW('b', 8, __s32) -#define BINDER_VERSION _IOWR('b', 9, struct binder_version) -#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) -#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) -#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object) - -/* - * NOTE: Two special error codes you should check for when calling - * in to the driver are: - * - * EINTR -- The operation has been interupted. This should be - * handled by retrying the ioctl() until a different error code - * is returned. - * - * ECONNREFUSED -- The driver is no longer accepting operations - * from your process. That is, the process is being destroyed. - * You should handle this by exiting from your process. Note - * that once this error code is returned, all further calls to - * the driver from any thread will return this same code. - */ - -enum transaction_flags { - TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ - TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ - TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ - TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ -}; - -struct binder_transaction_data { - /* The first two are only used for bcTRANSACTION and brTRANSACTION, - * identifying the target and contents of the transaction. - */ - union { - /* target descriptor of command transaction */ - __u32 handle; - /* target descriptor of return transaction */ - binder_uintptr_t ptr; - } target; - binder_uintptr_t cookie; /* target object cookie */ - __u32 code; /* transaction command */ - - /* General information about the transaction. */ - __u32 flags; - pid_t sender_pid; - uid_t sender_euid; - binder_size_t data_size; /* number of bytes of data */ - binder_size_t offsets_size; /* number of bytes of offsets */ - - /* If this transaction is inline, the data immediately - * follows here; otherwise, it ends with a pointer to - * the data buffer. - */ - union { - struct { - /* transaction data */ - binder_uintptr_t buffer; - /* offsets from buffer to flat_binder_object structs */ - binder_uintptr_t offsets; - } ptr; - __u8 buf[8]; - } data; -}; - -struct binder_transaction_data_secctx { - struct binder_transaction_data transaction_data; - binder_uintptr_t secctx; -}; - -struct binder_transaction_data_sg { - struct binder_transaction_data transaction_data; - binder_size_t buffers_size; -}; - -struct binder_ptr_cookie { - binder_uintptr_t ptr; - binder_uintptr_t cookie; -}; - -struct binder_handle_cookie { - __u32 handle; - binder_uintptr_t cookie; -} __packed; - -struct binder_pri_desc { - __s32 priority; - __u32 desc; -}; - -struct binder_pri_ptr_cookie { - __s32 priority; - binder_uintptr_t ptr; - binder_uintptr_t cookie; -}; - -enum binder_driver_return_protocol { - BR_ERROR = _IOR('r', 0, __s32), - /* - * int: error code - */ - - BR_OK = _IO('r', 1), - /* No parameters! */ - - BR_TRANSACTION_SEC_CTX = _IOR('r', 2, - struct binder_transaction_data_secctx), - /* - * binder_transaction_data_secctx: the received command. - */ - BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), - BR_REPLY = _IOR('r', 3, struct binder_transaction_data), - /* - * binder_transaction_data: the received command. - */ - - BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), - /* - * not currently supported - * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. - * Else the remote object has acquired a primary reference. - */ - - BR_DEAD_REPLY = _IO('r', 5), - /* - * The target of the last transaction (either a bcTRANSACTION or - * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. - */ - - BR_TRANSACTION_COMPLETE = _IO('r', 6), - /* - * No parameters... always refers to the last transaction requested - * (including replies). Note that this will be sent even for - * asynchronous transactions. - */ - - BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), - BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), - BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), - BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), - /* - * void *: ptr to binder - * void *: cookie for binder - */ - - BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), - /* - * not currently supported - * int: priority - * void *: ptr to binder - * void *: cookie for binder - */ - - BR_NOOP = _IO('r', 12), - /* - * No parameters. Do nothing and examine the next command. It exists - * primarily so that we can replace it with a BR_SPAWN_LOOPER command. - */ - - BR_SPAWN_LOOPER = _IO('r', 13), - /* - * No parameters. The driver has determined that a process has no - * threads waiting to service incoming transactions. When a process - * receives this command, it must spawn a new service thread and - * register it via bcENTER_LOOPER. - */ - - BR_FINISHED = _IO('r', 14), - /* - * not currently supported - * stop threadpool thread - */ - - BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t), - /* - * void *: cookie - */ - BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t), - /* - * void *: cookie - */ - - BR_FAILED_REPLY = _IO('r', 17), - /* - * The the last transaction (either a bcTRANSACTION or - * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. - */ -}; - -enum binder_driver_command_protocol { - BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), - BC_REPLY = _IOW('c', 1, struct binder_transaction_data), - /* - * binder_transaction_data: the sent command. - */ - - BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), - /* - * not currently supported - * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. - * Else you have acquired a primary reference on the object. - */ - - BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t), - /* - * void *: ptr to transaction data received on a read - */ - - BC_INCREFS = _IOW('c', 4, __u32), - BC_ACQUIRE = _IOW('c', 5, __u32), - BC_RELEASE = _IOW('c', 6, __u32), - BC_DECREFS = _IOW('c', 7, __u32), - /* - * int: descriptor - */ - - BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), - BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), - /* - * void *: ptr to binder - * void *: cookie for binder - */ - - BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), - /* - * not currently supported - * int: priority - * int: descriptor - */ - - BC_REGISTER_LOOPER = _IO('c', 11), - /* - * No parameters. - * Register a spawned looper thread with the device. - */ - - BC_ENTER_LOOPER = _IO('c', 12), - BC_EXIT_LOOPER = _IO('c', 13), - /* - * No parameters. - * These two commands are sent as an application-level thread - * enters and exits the binder loop, respectively. They are - * used so the binder can have an accurate count of the number - * of looping threads it has available. - */ - - BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, - struct binder_handle_cookie), - /* - * int: handle - * void *: cookie - */ - - BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, - struct binder_handle_cookie), - /* - * int: handle - * void *: cookie - */ - - BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t), - /* - * void *: cookie - */ - - BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg), - BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg), - /* - * binder_transaction_data_sg: the sent command. - */ -}; - -#endif /* _UAPI_LINUX_BINDER_H */ - diff --git a/binder/binder_alloc.c b/binder/binder_alloc.c index c4d8b78..5d27553 100644 --- a/binder/binder_alloc.c +++ b/binder/binder_alloc.c @@ -17,8 +17,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include -#include +#include #include #include #include @@ -27,29 +28,32 @@ #include #include #include -#include -#include +#include #include "binder_alloc.h" #include "binder_trace.h" +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif + struct list_lru binder_alloc_lru; static DEFINE_MUTEX(binder_alloc_mmap_lock); enum { - BINDER_DEBUG_USER_ERROR = 1U << 0, BINDER_DEBUG_OPEN_CLOSE = 1U << 1, BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, }; -static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR; +static uint32_t binder_alloc_debug_mask; -module_param_named(alloc_debug_mask, binder_alloc_debug_mask, uint, 0644); +module_param_named(debug_mask, binder_alloc_debug_mask, + uint, 0644); #define binder_alloc_debug(mask, x...) \ do { \ if (binder_alloc_debug_mask & mask) \ - pr_info_ratelimited(x); \ + pr_info(x); \ } while (0) static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) @@ -150,12 +154,14 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( else { /* * Guard against user threads attempting to - * free the buffer when in use by kernel or - * after it's already been freed. + * free the buffer twice */ - if (!buffer->allow_user_free) - return ERR_PTR(-EPERM); - buffer->allow_user_free = 0; + if (buffer->free_in_progress) { + pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", + alloc->pid, current->pid, (u64)user_ptr); + return NULL; + } + buffer->free_in_progress = 1; return buffer; } } @@ -185,12 +191,12 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, } static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - void *start, void *end) + void *start, void *end, + struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; struct binder_lru_page *page; - struct vm_area_struct *vma = NULL; struct mm_struct *mm = NULL; bool need_mm = false; @@ -214,18 +220,17 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } } - if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) + if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm)) mm = alloc->vma_vm_mm; if (mm) { - down_read(&mm->mmap_sem); + down_write(&mm->mmap_sem); vma = alloc->vma; } if (!vma && need_mm) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf failed to map pages in userspace, no vma\n", - alloc->pid); + pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", + alloc->pid); goto err_no_vma; } @@ -281,14 +286,11 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, goto err_vm_insert_page_failed; } - if (index + 1 > alloc->pages_high) - alloc->pages_high = index + 1; - trace_binder_alloc_page_end(alloc, index); /* vm_insert_page does not seem to increment the refcount */ } if (mm) { - up_read(&mm->mmap_sem); + up_write(&mm->mmap_sem); mmput(mm); } return 0; @@ -321,47 +323,17 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } err_no_vma: if (mm) { - up_read(&mm->mmap_sem); + up_write(&mm->mmap_sem); mmput(mm); } return vma ? -ENOMEM : -ESRCH; } - -static inline void binder_alloc_set_vma(struct binder_alloc *alloc, - struct vm_area_struct *vma) -{ - if (vma) - alloc->vma_vm_mm = vma->vm_mm; - /* - * If we see alloc->vma is not NULL, buffer data structures set up - * completely. Look at smp_rmb side binder_alloc_get_vma. - * We also want to guarantee new alloc->vma_vm_mm is always visible - * if alloc->vma is set. - */ - smp_wmb(); - alloc->vma = vma; -} - -static inline struct vm_area_struct *binder_alloc_get_vma( - struct binder_alloc *alloc) -{ - struct vm_area_struct *vma = NULL; - - if (alloc->vma) { - /* Look at description in binder_alloc_set_vma */ - smp_rmb(); - vma = alloc->vma; - } - return vma; -} - -static struct binder_buffer *binder_alloc_new_buf_locked( - struct binder_alloc *alloc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async) +struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) { struct rb_node *n = alloc->free_buffers.rb_node; struct binder_buffer *buffer; @@ -372,10 +344,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked( size_t size, data_offsets_size; int ret; - if (!binder_alloc_get_vma(alloc)) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf, no vma\n", - alloc->pid); + if (alloc->vma == NULL) { + pr_err("%d: binder_alloc_buf, no vma\n", + alloc->pid); return ERR_PTR(-ESRCH); } @@ -447,14 +418,11 @@ static struct binder_buffer *binder_alloc_new_buf_locked( if (buffer_size > largest_free_size) largest_free_size = buffer_size; } - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf size %zd failed, no address space\n", - alloc->pid, size); - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", - total_alloc_size, allocated_buffers, - largest_alloc_size, total_free_size, - free_buffers, largest_free_size); + pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", + alloc->pid, size); + pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", + total_alloc_size, allocated_buffers, largest_alloc_size, + total_free_size, free_buffers, largest_free_size); return ERR_PTR(-ENOSPC); } if (n == NULL) { @@ -474,7 +442,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; ret = binder_update_page_range(alloc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL); if (ret) return ERR_PTR(ret); @@ -495,7 +463,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( rb_erase(best_fit, &alloc->free_buffers); buffer->free = 0; - buffer->allow_user_free = 0; + buffer->free_in_progress = 0; binder_insert_allocated_buffer_locked(alloc, buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got %pK\n", @@ -515,7 +483,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( err_alloc_buf_struct_failed: binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), - end_page_addr); + end_page_addr, NULL); return ERR_PTR(-ENOMEM); } @@ -599,7 +567,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, alloc->pid, buffer->data, prev->data, next ? next->data : NULL); binder_update_page_range(alloc, 0, buffer_start_page(buffer), - buffer_start_page(buffer) + PAGE_SIZE); + buffer_start_page(buffer) + PAGE_SIZE, + NULL); } list_del(&buffer->entry); kfree(buffer); @@ -636,7 +605,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), + NULL); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; @@ -725,8 +695,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, } } #endif - alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, - sizeof(alloc->pages[0]), + alloc->pages = kzalloc(sizeof(alloc->pages[0]) * + ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); if (alloc->pages == NULL) { ret = -ENOMEM; @@ -747,7 +717,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; - binder_alloc_set_vma(alloc, vma); + barrier(); + alloc->vma = vma; + alloc->vma_vm_mm = vma->vm_mm; mmgrab(alloc->vma_vm_mm); return 0; @@ -762,10 +734,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, err_get_vm_area_failed: err_already_mapped: mutex_unlock(&binder_alloc_mmap_lock); - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%s: %d %lx-%lx %s failed %d\n", __func__, - alloc->pid, vma->vm_start, vma->vm_end, - failure_string, ret); + pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, + alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } @@ -776,10 +746,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) int buffers, page_count; struct binder_buffer *buffer; - buffers = 0; - mutex_lock(&alloc->mutex); BUG_ON(alloc->vma); + buffers = 0; + mutex_lock(&alloc->mutex); while ((n = rb_first(&alloc->allocated_buffers))) { buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -890,7 +860,6 @@ void binder_alloc_print_pages(struct seq_file *m, } mutex_unlock(&alloc->mutex); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); - seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); } /** @@ -922,7 +891,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) */ void binder_alloc_vma_close(struct binder_alloc *alloc) { - binder_alloc_set_vma(alloc, NULL); + WRITE_ONCE(alloc->vma, NULL); } /** @@ -938,7 +907,6 @@ enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, spinlock_t *lock, void *cb_arg) - __must_hold(lock) { struct mm_struct *mm = NULL; struct binder_lru_page *page = container_of(item, @@ -958,13 +926,14 @@ enum lru_status binder_alloc_free_page(struct list_head *item, index = page - alloc->pages; page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; - - mm = alloc->vma_vm_mm; - if (!mmget_not_zero(mm)) - goto err_mmget; - if (!down_write_trylock(&mm->mmap_sem)) - goto err_down_write_mmap_sem_failed; - vma = binder_alloc_get_vma(alloc); + vma = alloc->vma; + if (vma) { + if (!mmget_not_zero(alloc->vma_vm_mm)) + goto err_mmget; + mm = alloc->vma_vm_mm; + if (!down_write_trylock(&mm->mmap_sem)) + goto err_down_write_mmap_sem_failed; + } list_lru_isolate(lru, item); spin_unlock(lock); @@ -977,9 +946,10 @@ enum lru_status binder_alloc_free_page(struct list_head *item, PAGE_SIZE); trace_binder_unmap_user_end(alloc, index); + + up_write(&mm->mmap_sem); + mmput(mm); } - up_write(&mm->mmap_sem); - mmput(mm); trace_binder_unmap_kernel_start(alloc, index); @@ -1019,7 +989,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) return ret; } -static struct shrinker binder_shrinker = { +struct shrinker binder_shrinker = { .count_objects = binder_shrink_count, .scan_objects = binder_shrink_scan, .seeks = DEFAULT_SEEKS, @@ -1039,14 +1009,8 @@ void binder_alloc_init(struct binder_alloc *alloc) INIT_LIST_HEAD(&alloc->buffers); } -int binder_alloc_shrinker_init(void) +void binder_alloc_shrinker_init(void) { - int ret = list_lru_init(&binder_alloc_lru); - - if (ret == 0) { - ret = register_shrinker(&binder_shrinker); - if (ret) - list_lru_destroy(&binder_alloc_lru); - } - return ret; + list_lru_init(&binder_alloc_lru); + register_shrinker(&binder_shrinker); } diff --git a/binder/binder_alloc.h b/binder/binder_alloc.h index 366c833..2dd33b6 100644 --- a/binder/binder_alloc.h +++ b/binder/binder_alloc.h @@ -15,7 +15,6 @@ #ifndef _LINUX_BINDER_ALLOC_H #define _LINUX_BINDER_ALLOC_H -#include #include #include #include @@ -31,16 +30,16 @@ struct binder_transaction; * struct binder_buffer - buffer used for binder transactions * @entry: entry alloc->buffers * @rb_node: node for allocated_buffers/free_buffers rb trees - * @free: %true if buffer is free - * @allow_user_free: %true if user is allowed to free buffer - * @async_transaction: %true if buffer is in use for an async txn - * @debug_id: unique ID for debugging - * @transaction: pointer to associated struct binder_transaction - * @target_node: struct binder_node associated with this buffer - * @data_size: size of @transaction data - * @offsets_size: size of array of offsets - * @extra_buffers_size: size of space for other objects (like sg lists) - * @data: pointer to base of buffer space + * @free: true if buffer is free + * @allow_user_free: describe the second member of struct blah, + * @async_transaction: describe the second member of struct blah, + * @debug_id: describe the second member of struct blah, + * @transaction: describe the second member of struct blah, + * @target_node: describe the second member of struct blah, + * @data_size: describe the second member of struct blah, + * @offsets_size: describe the second member of struct blah, + * @extra_buffers_size: describe the second member of struct blah, + * @data:i describe the second member of struct blah, * * Bookkeeping structure for binder transaction buffers */ @@ -51,7 +50,8 @@ struct binder_buffer { unsigned free:1; unsigned allow_user_free:1; unsigned async_transaction:1; - unsigned debug_id:29; + unsigned free_in_progress:1; + unsigned debug_id:28; struct binder_transaction *transaction; @@ -92,7 +92,6 @@ struct binder_lru_page { * @pages: array of binder_lru_page * @buffer_size: size of address space specified via mmap * @pid: pid for associated binder_proc (invariant after init) - * @pages_high: high watermark of offset in @pages * * Bookkeeping structure for per-proc address space management for binder * buffers. It is normally initialized during binder_init() and binder_mmap() @@ -113,10 +112,9 @@ struct binder_alloc { size_t buffer_size; uint32_t buffer_free; int pid; - size_t pages_high; }; -#if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_SELFTEST) +#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST void binder_selftest_alloc(struct binder_alloc *alloc); #else static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} @@ -130,7 +128,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t extra_buffers_size, int is_async); extern void binder_alloc_init(struct binder_alloc *alloc); -extern int binder_alloc_shrinker_init(void); +void binder_alloc_shrinker_init(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); extern struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, diff --git a/binder/binderfs.h b/binder/binder_ctl.h similarity index 77% rename from binder/binderfs.h rename to binder/binder_ctl.h index 29edbad..65b2efd 100644 --- a/binder/binderfs.h +++ b/binder/binder_ctl.h @@ -4,15 +4,15 @@ * */ -#ifndef _UAPI_LINUX_BINDERFS_H -#define _UAPI_LINUX_BINDERFS_H +#ifndef _UAPI_LINUX_BINDER_CTL_H +#define _UAPI_LINUX_BINDER_CTL_H +#include #include #include -#include "binder.h" #define BINDERFS_MAX_NAME 255 -#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 + /** * struct binderfs_device - retrieve information about a new binder device * @name: the name to use for the new binderfs binder device @@ -22,8 +22,8 @@ */ struct binderfs_device { char name[BINDERFS_MAX_NAME + 1]; - __u32 major; - __u32 minor; + __u8 major; + __u8 minor; }; /** @@ -31,5 +31,5 @@ struct binderfs_device { */ #define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device) -#endif /* _UAPI_LINUX_BINDERFS_H */ +#endif /* _UAPI_LINUX_BINDER_CTL_H */ diff --git a/binder/binder_internal.h b/binder/binder_internal.h index 759bcec..161c993 100644 --- a/binder/binder_internal.h +++ b/binder/binder_internal.h @@ -5,7 +5,6 @@ #include #include -#include #include #include #include @@ -38,22 +37,13 @@ struct binder_device { extern const struct file_operations binder_fops; -#if IS_ENABLED(CONFIG_ANDROID_BINDERFS) +//#ifdef CONFIG_ANDROID_BINDERFS extern bool is_binderfs_device(const struct inode *inode); -#else -static inline bool is_binderfs_device(const struct inode *inode) -{ - return false; -} -#endif - -#if IS_ENABLED(CONFIG_ANDROID_BINDERFS) -extern int __init init_binderfs(void); -#else -static inline int __init init_binderfs(void) -{ - return 0; -} -#endif +//#else +//static inline bool is_binderfs_device(const struct inode *inode) +//{ +// return false; +//} +//#endif #endif /* _LINUX_BINDER_INTERNAL_H */ diff --git a/binder/binder_trace.h b/binder/binder_trace.h index 14de7ac..76e3b9c 100644 --- a/binder/binder_trace.h +++ b/binder/binder_trace.h @@ -223,40 +223,22 @@ TRACE_EVENT(binder_transaction_ref_to_ref, __entry->dest_ref_debug_id, __entry->dest_ref_desc) ); -TRACE_EVENT(binder_transaction_fd_send, - TP_PROTO(struct binder_transaction *t, int fd, size_t offset), - TP_ARGS(t, fd, offset), +TRACE_EVENT(binder_transaction_fd, + TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd), + TP_ARGS(t, src_fd, dest_fd), TP_STRUCT__entry( __field(int, debug_id) - __field(int, fd) - __field(size_t, offset) - ), - TP_fast_assign( - __entry->debug_id = t->debug_id; - __entry->fd = fd; - __entry->offset = offset; - ), - TP_printk("transaction=%d src_fd=%d offset=%zu", - __entry->debug_id, __entry->fd, __entry->offset) -); - -TRACE_EVENT(binder_transaction_fd_recv, - TP_PROTO(struct binder_transaction *t, int fd, size_t offset), - TP_ARGS(t, fd, offset), - - TP_STRUCT__entry( - __field(int, debug_id) - __field(int, fd) - __field(size_t, offset) + __field(int, src_fd) + __field(int, dest_fd) ), TP_fast_assign( __entry->debug_id = t->debug_id; - __entry->fd = fd; - __entry->offset = offset; + __entry->src_fd = src_fd; + __entry->dest_fd = dest_fd; ), - TP_printk("transaction=%d dest_fd=%d offset=%zu", - __entry->debug_id, __entry->fd, __entry->offset) + TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d", + __entry->debug_id, __entry->src_fd, __entry->dest_fd) ); DECLARE_EVENT_CLASS(binder_buffer_class, @@ -266,17 +248,14 @@ DECLARE_EVENT_CLASS(binder_buffer_class, __field(int, debug_id) __field(size_t, data_size) __field(size_t, offsets_size) - __field(size_t, extra_buffers_size) ), TP_fast_assign( __entry->debug_id = buf->debug_id; __entry->data_size = buf->data_size; __entry->offsets_size = buf->offsets_size; - __entry->extra_buffers_size = buf->extra_buffers_size; ), - TP_printk("transaction=%d data_size=%zd offsets_size=%zd extra_buffers_size=%zd", - __entry->debug_id, __entry->data_size, __entry->offsets_size, - __entry->extra_buffers_size) + TP_printk("transaction=%d data_size=%zd offsets_size=%zd", + __entry->debug_id, __entry->data_size, __entry->offsets_size) ); DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf, diff --git a/binder/binderfs.c b/binder/binderfs.c index dc3e4f5..5199a35 100644 --- a/binder/binderfs.c +++ b/binder/binderfs.c @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#include #include #include #include @@ -11,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -21,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -29,43 +26,27 @@ #include #include #include +#include #include +#include +#include "binder_ctl.h" #include "binder_internal.h" -#include "binder.h" -#include "binderfs.h" -#include "deps.h" #define FIRST_INODE 1 #define SECOND_INODE 2 #define INODE_OFFSET 3 #define INTSTRLEN 21 #define BINDERFS_MAX_MINOR (1U << MINORBITS) -/* Ensure that the initial ipc namespace always has devices available. */ -#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4) + +#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 + +static struct vfsmount *binderfs_mnt; static dev_t binderfs_dev; static DEFINE_MUTEX(binderfs_minors_mutex); static DEFINE_IDA(binderfs_minors); -/** - * binderfs_mount_opts - mount options for binderfs - * @max: maximum number of allocatable binderfs binder devices - */ -struct binderfs_mount_opts { - int max; -}; - -enum { - Opt_max, - Opt_err -}; - -static const match_table_t tokens = { - { Opt_max, "max=%d" }, - { Opt_err, NULL } -}; - /** * binderfs_info - information about a binderfs mount * @ipc_ns: The ipc namespace the binderfs mount belongs to. @@ -75,16 +56,13 @@ static const match_table_t tokens = { * created. * @root_gid: gid that needs to be used when a new binder device is * created. - * @mount_opts: The mount options in use. - * @device_count: The current number of allocated binder devices. */ struct binderfs_info { struct ipc_namespace *ipc_ns; struct dentry *control_dentry; kuid_t root_uid; kgid_t root_gid; - struct binderfs_mount_opts mount_opts; - int device_count; + }; static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) @@ -92,6 +70,7 @@ static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) return inode->i_sb->s_fs_info; } +/* bool is_binderfs_device(const struct inode *inode) { if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC) @@ -99,7 +78,7 @@ bool is_binderfs_device(const struct inode *inode) return false; } - +*/ /** * binderfs_binder_device_create - allocate inode from super block of a * binderfs mount @@ -107,7 +86,7 @@ bool is_binderfs_device(const struct inode *inode) * @userp: buffer to copy information about new device for userspace to * @req: struct binderfs_device as copied from userspace * - * This function allocates a new binder_device and reserves a new minor + * This function allocated a new binder_device and reserves a new minor * number for it. * Minor numbers are limited and tracked globally in binderfs_minors. The * function will stash a struct binder_device for the specific binder @@ -123,34 +102,21 @@ static int binderfs_binder_device_create(struct inode *ref_inode, struct binderfs_device *req) { int minor, ret; - struct dentry *dentry, *root; + struct dentry *dentry, *dup, *root; struct binder_device *device; + size_t name_len = BINDERFS_MAX_NAME + 1; char *name = NULL; - size_t name_len; struct inode *inode = NULL; struct super_block *sb = ref_inode->i_sb; struct binderfs_info *info = sb->s_fs_info; -#if defined(CONFIG_IPC_NS) - bool use_reserve = (info->ipc_ns == show_init_ipc_ns_compat()); -#else - bool use_reserve = true; -#endif /* Reserve new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); - if (++info->device_count <= info->mount_opts.max) - minor = ida_alloc_max_compat(&binderfs_minors, - use_reserve ? BINDERFS_MAX_MINOR : - BINDERFS_MAX_MINOR_CAPPED, - GFP_KERNEL); - else - minor = -ENOSPC; - if (minor < 0) { - --info->device_count; - mutex_unlock(&binderfs_minors_mutex); - return minor; - } + //minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); + minor = ida_simple_get(&binderfs_minors, 0, BINDERFS_MAX_MINOR, GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); + if (minor < 0) + return minor; ret = -ENOMEM; device = kzalloc(sizeof(*device), GFP_KERNEL); @@ -169,13 +135,12 @@ static int binderfs_binder_device_create(struct inode *ref_inode, inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; - req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */ - name_len = strlen(req->name); - /* Make sure to include terminating NUL byte */ - name = kmemdup(req->name, name_len + 1, GFP_KERNEL); + name = kmalloc(name_len, GFP_KERNEL); if (!name) goto err; + strscpy(name, req->name, name_len); + device->binderfs_inode = inode; device->context.binder_context_mgr_uid = INVALID_UID; device->context.name = name; @@ -194,25 +159,28 @@ static int binderfs_binder_device_create(struct inode *ref_inode, root = sb->s_root; inode_lock(d_inode(root)); - - /* look it up */ - dentry = lookup_one_len(name, root, name_len); - if (IS_ERR(dentry)) { + dentry = d_alloc_name(root, name); + if (!dentry) { inode_unlock(d_inode(root)); - ret = PTR_ERR(dentry); + ret = -ENOMEM; goto err; } - if (d_really_is_positive(dentry)) { - /* already exists */ - dput(dentry); - inode_unlock(d_inode(root)); - ret = -EEXIST; - goto err; + /* Verify that the name userspace gave us is not already in use. */ + dup = d_lookup(root, &dentry->d_name); + if (dup) { + if (d_really_is_positive(dup)) { + dput(dup); + dput(dentry); + inode_unlock(d_inode(root)); + ret = -EEXIST; + goto err; + } + dput(dup); } inode->i_private = device; - d_instantiate(dentry, inode); + d_add(dentry, inode); fsnotify_create(root->d_inode, dentry); inode_unlock(d_inode(root)); @@ -222,8 +190,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode, kfree(name); kfree(device); mutex_lock(&binderfs_minors_mutex); - --info->device_count; - ida_free_compat(&binderfs_minors, minor); + //ida_free(&binderfs_minors, minor); + ida_simple_remove(&binderfs_minors, minor); mutex_unlock(&binderfs_minors_mutex); iput(inode); @@ -268,7 +236,6 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd, static void binderfs_evict_inode(struct inode *inode) { struct binder_device *device = inode->i_private; - struct binderfs_info *info = BINDERFS_I(inode); clear_inode(inode); @@ -276,95 +243,51 @@ static void binderfs_evict_inode(struct inode *inode) return; mutex_lock(&binderfs_minors_mutex); - --info->device_count; - ida_free_compat(&binderfs_minors, device->miscdev.minor); + //ida_free(&binderfs_minors, device->miscdev.minor); + ida_simple_remove(&binderfs_minors, device->miscdev.minor); mutex_unlock(&binderfs_minors_mutex); kfree(device->context.name); kfree(device); } -/** - * binderfs_parse_mount_opts - parse binderfs mount options - * @data: options to set (can be NULL in which case defaults are used) - */ -static int binderfs_parse_mount_opts(char *data, - struct binderfs_mount_opts *opts) -{ - char *p; - opts->max = BINDERFS_MAX_MINOR; - - while ((p = strsep(&data, ",")) != NULL) { - substring_t args[MAX_OPT_ARGS]; - int token; - int max_devices; - - if (!*p) - continue; - - token = match_token(p, tokens, args); - switch (token) { - case Opt_max: - if (match_int(&args[0], &max_devices) || - (max_devices < 0 || - (max_devices > BINDERFS_MAX_MINOR))) - return -EINVAL; - - opts->max = max_devices; - break; - default: - pr_err("Invalid mount options\n"); - return -EINVAL; - } - } - - return 0; -} - -static int binderfs_remount(struct super_block *sb, int *flags, char *data) -{ - struct binderfs_info *info = sb->s_fs_info; - return binderfs_parse_mount_opts(data, &info->mount_opts); -} - -static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root) -{ - struct binderfs_info *info; - - info = root->d_sb->s_fs_info; - if (info->mount_opts.max <= BINDERFS_MAX_MINOR) - seq_printf(seq, ",max=%d", info->mount_opts.max); - - return 0; -} - static const struct super_operations binderfs_super_ops = { - .evict_inode = binderfs_evict_inode, - .remount_fs = binderfs_remount, - .show_options = binderfs_show_mount_opts, - .statfs = simple_statfs, + .statfs = simple_statfs, + .evict_inode = binderfs_evict_inode, }; -static inline bool is_binderfs_control_device(const struct dentry *dentry) -{ - struct binderfs_info *info = dentry->d_sb->s_fs_info; - return info->control_dentry == dentry; -} - static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { - if (is_binderfs_control_device(old_dentry) || - is_binderfs_control_device(new_dentry)) + struct inode *inode = d_inode(old_dentry); + + /* binderfs doesn't support directories. */ + if (d_is_dir(old_dentry)) return -EPERM; - return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags); + if (flags & ~RENAME_NOREPLACE) + return -EINVAL; + + if (!simple_empty(new_dentry)) + return -ENOTEMPTY; + + if (d_really_is_positive(new_dentry)) + simple_unlink(new_dir, new_dentry); + + old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime = + new_dir->i_mtime = inode->i_ctime = current_time(old_dir); + + return 0; } static int binderfs_unlink(struct inode *dir, struct dentry *dentry) { - if (is_binderfs_control_device(dentry)) + /* + * The control dentry is only ever touched during mount so checking it + * here should not require us to take lock. + */ + if (BINDERFS_I(dir)->control_dentry == dentry) return -EPERM; return simple_unlink(dir, dentry); @@ -395,16 +318,13 @@ static int binderfs_binder_ctl_create(struct super_block *sb) struct inode *inode = NULL; struct dentry *root = sb->s_root; struct binderfs_info *info = sb->s_fs_info; -#if defined(CONFIG_IPC_NS) - bool use_reserve = (info->ipc_ns == show_init_ipc_ns_compat()); -#else - bool use_reserve = true; -#endif device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) return -ENOMEM; + inode_lock(d_inode(root)); + /* If we have already created a binder-control node, return. */ if (info->control_dentry) { ret = 0; @@ -418,10 +338,8 @@ static int binderfs_binder_ctl_create(struct super_block *sb) /* Reserve a new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); - minor = ida_alloc_max_compat(&binderfs_minors, - use_reserve ? BINDERFS_MAX_MINOR : - BINDERFS_MAX_MINOR_CAPPED, - GFP_KERNEL); + //minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); + minor = ida_simple_get(&binderfs_minors, 0, BINDERFS_MAX_MINOR, GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); if (minor < 0) { ret = minor; @@ -446,10 +364,12 @@ static int binderfs_binder_ctl_create(struct super_block *sb) inode->i_private = device; info->control_dentry = dentry; d_add(dentry, inode); + inode_unlock(d_inode(root)); return 0; out: + inode_unlock(d_inode(root)); kfree(device); iput(inode); @@ -464,9 +384,12 @@ static const struct inode_operations binderfs_dir_inode_operations = { static int binderfs_fill_super(struct super_block *sb, void *data, int silent) { - int ret; struct binderfs_info *info; + int ret = -ENOMEM; struct inode *inode = NULL; + struct ipc_namespace *ipc_ns = sb->s_fs_info; + + get_ipc_ns(ipc_ns); sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; @@ -488,17 +411,11 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_op = &binderfs_super_ops; sb->s_time_gran = 1; - sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); - if (!sb->s_fs_info) - return -ENOMEM; - info = sb->s_fs_info; - - info->ipc_ns = get_ipc_ns_exported_compat(current->nsproxy->ipc_ns); - - ret = binderfs_parse_mount_opts(data, &info->mount_opts); - if (ret) - return ret; + info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); + if (!info) + goto err_without_dentry; + info->ipc_ns = ipc_ns; info->root_gid = make_kgid(sb->s_user_ns, 0); if (!gid_valid(info->root_gid)) info->root_gid = GLOBAL_ROOT_GID; @@ -506,9 +423,11 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) if (!uid_valid(info->root_uid)) info->root_uid = GLOBAL_ROOT_UID; + sb->s_fs_info = info; + inode = new_inode(sb); if (!inode) - return -ENOMEM; + goto err_without_dentry; inode->i_ino = FIRST_INODE; inode->i_fop = &simple_dir_operations; @@ -519,28 +438,94 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_root = d_make_root(inode); if (!sb->s_root) - return -ENOMEM; + goto err_without_dentry; + + ret = binderfs_binder_ctl_create(sb); + if (ret) + goto err_with_dentry; + + return 0; + +err_with_dentry: + dput(sb->s_root); + sb->s_root = NULL; - return binderfs_binder_ctl_create(sb); +err_without_dentry: + put_ipc_ns(ipc_ns); + iput(inode); + kfree(info); + + return ret; +} + +static int binderfs_test_super(struct super_block *sb, void *data) +{ + struct binderfs_info *info = sb->s_fs_info; + + if (info) + return info->ipc_ns == data; + + return 0; +} + +static int binderfs_set_super(struct super_block *sb, void *data) +{ + sb->s_fs_info = data; + return set_anon_super(sb, NULL); } static struct dentry *binderfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_nodev(fs_type, flags, data, binderfs_fill_super); + +/* + sget_userns function is removed in kernel version higher than 5.3 + This will fail the compiling of binderfs, since currently used binderfs is + backport from kernel 4.21 + In the latest binderfs code, these codes are changed too. + So, this is a hotfix which enabing CIC on kernel high than 5.3.0 + (TODO) Update binderfs code +*/ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + return mount_nodev(fs_type, flags, data, binderfs_fill_super); + +#else + struct super_block *sb; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + + if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super, + flags, ipc_ns->user_ns, ipc_ns); + if (IS_ERR(sb)) + return ERR_CAST(sb); + + if (!sb->s_root) { + int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0); + if (ret) { + deactivate_locked_super(sb); + return ERR_PTR(ret); + } + + sb->s_flags |= SB_ACTIVE; + } + + return dget(sb->s_root); + +#endif } static void binderfs_kill_super(struct super_block *sb) { struct binderfs_info *info = sb->s_fs_info; - kill_litter_super(sb); - if (info && info->ipc_ns) put_ipc_ns(info->ipc_ns); kfree(info); + kill_litter_super(sb); } static struct file_system_type binder_fs_type = { @@ -550,7 +535,7 @@ static struct file_system_type binder_fs_type = { .fs_flags = FS_USERNS_MOUNT, }; -int __init init_binderfs(void) +static int __init init_binderfs(void) { int ret; @@ -566,5 +551,16 @@ int __init init_binderfs(void) return ret; } + binderfs_mnt = kern_mount(&binder_fs_type); + if (IS_ERR(binderfs_mnt)) { + ret = PTR_ERR(binderfs_mnt); + binderfs_mnt = NULL; + unregister_filesystem(&binder_fs_type); + unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR); + } + return ret; } + +module_init(init_binderfs); +MODULE_LICENSE("GPL v2"); diff --git a/binder/deps.c b/binder/deps.c index dfc14a6..b1a108c 100644 --- a/binder/deps.c +++ b/binder/deps.c @@ -1,4 +1,15 @@ -#include "deps.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 static struct vm_struct *(*get_vm_area_ptr)(unsigned long, unsigned long) = NULL; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) @@ -20,8 +31,6 @@ static int (*security_binder_transfer_binder_ptr)(struct task_struct *from, stru static int (*security_binder_transfer_file_ptr)(struct task_struct *from, struct task_struct *to, struct file *file) = NULL; static void (*mmput_async_ptr)(struct mm_struct *) = NULL; static void (*put_ipc_ns_ptr)(struct ipc_namespace *) = NULL; -static int (*task_work_add_ptr)(struct task_struct *, struct callback_head *, bool) = NULL; -static struct ipc_namespace *(*show_init_ipc_ns_ptr)(void) = NULL; struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { @@ -136,115 +145,17 @@ void mmput_async(struct mm_struct *mm) mmput_async_ptr(mm); } -void put_ipc_ns(struct ipc_namespace *ns) -{ - if (!put_ipc_ns_ptr) - put_ipc_ns_ptr = kallsyms_lookup_name("put_ipc_ns"); - if(put_ipc_ns_ptr) - put_ipc_ns_ptr(ns); -} - -int task_work_add(struct task_struct *task, struct callback_head *twork, bool notify) -{ - if(!task_work_add_ptr) - task_work_add_ptr = kallsyms_lookup_name("task_work_add"); - if(task_work_add_ptr) - return task_work_add_ptr(task, twork, notify); - else - return -1; -} - -struct ipc_namespace *show_init_ipc_ns_compat(void) -{ - if(!show_init_ipc_ns_ptr) - show_init_ipc_ns_ptr = kallsyms_lookup_name("show_init_ipc_ns"); - if(show_init_ipc_ns_ptr) - return show_init_ipc_ns_ptr(); - else - return NULL; -} - -static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) -{ - __clear_bit(fd, fdt->open_fds); - __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits); -} - -static void __put_unused_fd(struct files_struct *files, unsigned int fd) -{ - struct fdtable *fdt = files_fdtable(files); - __clear_open_fd(fd, fdt); - if (fd < files->next_fd) - files->next_fd = fd; -} - -static int close_fd_get_file_backport(unsigned int fd, struct file **res) +bool is_binderfs_device(const struct inode *inode) { - struct files_struct *files = current->files; - struct file *file; - struct fdtable *fdt; + if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC) + return true; - spin_lock(&files->file_lock); - fdt = files_fdtable(files); - if (fd >= fdt->max_fds) - goto out_unlock; - file = fdt->fd[fd]; - if (!file) - goto out_unlock; - rcu_assign_pointer(fdt->fd[fd], NULL); - __put_unused_fd(files, fd); - spin_unlock(&files->file_lock); - get_file(file); - *res = file; - return filp_close(file, files); - -out_unlock: - spin_unlock(&files->file_lock); - *res = NULL; - return -ENOENT; -} - -int __close_fd_get_file_compat(unsigned int fd, struct file **res) -{ - int (*close_fd_get_file_ptr)(unsigned int fd, struct file **res) = NULL; - - close_fd_get_file_ptr = kallsyms_lookup_name("__close_fd_get_file"); - if(close_fd_get_file_ptr) - return close_fd_get_file_ptr(fd, res); - else - return close_fd_get_file_backport(fd, res); -} - -struct ipc_namespace *get_ipc_ns_exported_compat(struct ipc_namespace *ns) -{ - struct ipc_namespace *(*get_ipc_ns_exported_ptr)(struct ipc_namespace *ns) = NULL; - - get_ipc_ns_exported_ptr = kallsyms_lookup_name("get_ipc_ns_exported"); - if(get_ipc_ns_exported_ptr) - return get_ipc_ns_exported_ptr(ns); - else - return get_ipc_ns(ns); + return false; } -int ida_alloc_max_compat(struct ida *ida, unsigned int max, gfp_t gfp) -{ - int (*ida_alloc_max_ptr)(struct ida *, unsigned int, gfp_t) = NULL; - - ida_alloc_max_ptr = kallsyms_lookup_name("ida_alloc_max"); - if(ida_alloc_max_ptr) - return ida_alloc_max_ptr(ida, max, gfp); - else - return ida_simple_get(ida, 0, max, gfp); - -} - -void ida_free_compat(struct ida *ida, unsigned int id) +void put_ipc_ns(struct ipc_namespace *ns) { - void (*ida_free_ptr)(struct ida *, unsigned int) = NULL; - ida_free_ptr = kallsyms_lookup_name("ida_free"); - - if(ida_free_ptr) - return ida_free_ptr(ida, id); - else - return ida_simple_remove(ida, id); + if (!put_ipc_ns_ptr) + put_ipc_ns_ptr = kallsyms_lookup_name("put_ipc_ns"); + put_ipc_ns_ptr(ns); } diff --git a/binder/deps.h b/binder/deps.h deleted file mode 100644 index 2665d3e..0000000 --- a/binder/deps.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef _DEPS_H -#define _DEPS_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) - void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size); -#else - void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details); -#endif -int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); -struct files_struct *get_files_struct(struct task_struct *task); -void put_files_struct(struct files_struct *files); -struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags); -int __alloc_fd(struct files_struct *files, unsigned start, unsigned end, unsigned flags); -void __fd_install(struct files_struct *files, unsigned int fd, struct file *file); -int __close_fd(struct files_struct *files, unsigned int fd); -int can_nice(const struct task_struct *p, const int nice); -int security_binder_set_context_mgr(struct task_struct *mgr); -int security_binder_transaction(struct task_struct *from, struct task_struct *to); -int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to); -int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file); -void mmput_async(struct mm_struct *mm); -void put_ipc_ns(struct ipc_namespace *ns); -int task_work_add(struct task_struct *task, struct callback_head *twork, bool notify); -int __close_fd_get_file_compat(unsigned int fd, struct file **res); -struct ipc_namespace *show_init_ipc_ns_compat(void); -struct ipc_namespace *get_ipc_ns_exported_compat(struct ipc_namespace *ns); -int ida_alloc_max_compat(struct ida *ida, unsigned int max, gfp_t gfp); -void ida_free_compat(struct ida *, unsigned int id); - -#endif