Handler消息机制-Native层
Posted xhBruce
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Handler消息机制-Native层相关的知识,希望对你有一定的参考价值。
Handler消息机制-Native层
android12-release
1. MessageQueue连接Native层
1.1 nativeInit初始化
NativeMessageQueue
继承MessageQueue
、LooperCallback
Looper::getForThread()
获取TLS中的Looper对象;Looper::setForThread(mLooper)
保存native层的Looper到TLSeventfd(0, EFD_NONBLOCK | EFD_CLOEXEC)
构造唤醒事件的fd;rebuildEpollLocked()
重建Epoll实例并注册wake管道;这里Looper与Java层的Looper没有任何的关系
frameworks/base/core/jni/android_os_MessageQueue.cpp
system/core/libutils/include/utils/Looper.h
system/core/libutils/Looper.cpp
1.2 nativePollOnce阻塞操作
pollOnce()
处理Response数组中不带Callback的事件(ident >= 0
表示没有callback, 因为POLL_CALLBACK = -2
),之后调用了pollInner()方法pollInner()
查看Done:
阶段,先处理Native的Messagehandler->handleMessage(message)
,再处理Request中response.request.callback->handleEvent(fd, events, data)
system/core/libutils/Looper.cpp
int Looper::pollOnce(int timeoutMillis, int* outFd, int* outEvents, void** outData)
int result = 0;
for (;;)
while (mResponseIndex < mResponses.size())
const Response& response = mResponses.itemAt(mResponseIndex++);
int ident = response.request.ident;
if (ident >= 0)
int fd = response.request.fd;
int events = response.events;
void* data = response.request.data;
#if DEBUG_POLL_AND_WAKE
ALOGD("%p ~ pollOnce - returning signalled identifier %d: "
"fd=%d, events=0x%x, data=%p",
this, ident, fd, events, data);
#endif
if (outFd != nullptr) *outFd = fd;
if (outEvents != nullptr) *outEvents = events;
if (outData != nullptr) *outData = data;
return ident;
if (result != 0)
#if DEBUG_POLL_AND_WAKE
ALOGD("%p ~ pollOnce - returning result %d", this, result);
#endif
if (outFd != nullptr) *outFd = 0;
if (outEvents != nullptr) *outEvents = 0;
if (outData != nullptr) *outData = nullptr;
return result;
result = pollInner(timeoutMillis);
int Looper::pollInner(int timeoutMillis)
#if DEBUG_POLL_AND_WAKE
ALOGD("%p ~ pollOnce - waiting: timeoutMillis=%d", this, timeoutMillis);
#endif
// Adjust the timeout based on when the next message is due.
if (timeoutMillis != 0 && mNextMessageUptime != LLONG_MAX)
nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
int messageTimeoutMillis = toMillisecondTimeoutDelay(now, mNextMessageUptime);
if (messageTimeoutMillis >= 0
&& (timeoutMillis < 0 || messageTimeoutMillis < timeoutMillis))
timeoutMillis = messageTimeoutMillis;
#if DEBUG_POLL_AND_WAKE
ALOGD("%p ~ pollOnce - next message in %" PRId64 "ns, adjusted timeout: timeoutMillis=%d",
this, mNextMessageUptime - now, timeoutMillis);
#endif
// Poll.
int result = POLL_WAKE;
mResponses.clear();
mResponseIndex = 0;
// We are about to idle.
mPolling = true;
struct epoll_event eventItems[EPOLL_MAX_EVENTS];
int eventCount = epoll_wait(mEpollFd.get(), eventItems, EPOLL_MAX_EVENTS, timeoutMillis);
// No longer idling.
mPolling = false;
// Acquire lock.
mLock.lock();
// Rebuild epoll set if needed.
if (mEpollRebuildRequired)
mEpollRebuildRequired = false;
rebuildEpollLocked();
goto Done;
// Check for poll error.
if (eventCount < 0)
if (errno == EINTR)
goto Done;
ALOGW("Poll failed with an unexpected error: %s", strerror(errno));
result = POLL_ERROR;
goto Done;
// Check for poll timeout.
if (eventCount == 0)
#if DEBUG_POLL_AND_WAKE
ALOGD("%p ~ pollOnce - timeout", this);
#endif
result = POLL_TIMEOUT;
goto Done;
// Handle all events.
#if DEBUG_POLL_AND_WAKE
ALOGD("%p ~ pollOnce - handling events from %d fds", this, eventCount);
#endif
for (int i = 0; i < eventCount; i++)
int fd = eventItems[i].data.fd;
uint32_t epollEvents = eventItems[i].events;
if (fd == mWakeEventFd.get())
if (epollEvents & EPOLLIN)
awoken();
else
ALOGW("Ignoring unexpected epoll events 0x%x on wake event fd.", epollEvents);
else
ssize_t requestIndex = mRequests.indexOfKey(fd);
if (requestIndex >= 0)
int events = 0;
if (epollEvents & EPOLLIN) events |= EVENT_INPUT;
if (epollEvents & EPOLLOUT) events |= EVENT_OUTPUT;
if (epollEvents & EPOLLERR) events |= EVENT_ERROR;
if (epollEvents & EPOLLHUP) events |= EVENT_HANGUP;
pushResponse(events, mRequests.valueAt(requestIndex));
else
ALOGW("Ignoring unexpected epoll events 0x%x on fd %d that is "
"no longer registered.", epollEvents, fd);
Done: ;
// Invoke pending message callbacks.
mNextMessageUptime = LLONG_MAX;
while (mMessageEnvelopes.size() != 0)
nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(0);
if (messageEnvelope.uptime <= now)
// Remove the envelope from the list.
// We keep a strong reference to the handler until the call to handleMessage
// finishes. Then we drop it so that the handler can be deleted *before*
// we reacquire our lock.
// obtain handler
sp<MessageHandler> handler = messageEnvelope.handler;
Message message = messageEnvelope.message;
mMessageEnvelopes.removeAt(0);
mSendingMessage = true;
mLock.unlock();
#if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS
ALOGD("%p ~ pollOnce - sending message: handler=%p, what=%d",
this, handler.get(), message.what);
#endif
handler->handleMessage(message);
// release handler
mLock.lock();
mSendingMessage = false;
result = POLL_CALLBACK;
else
// The last message left at the head of the queue determines the next wakeup time.
mNextMessageUptime = messageEnvelope.uptime;
break;
// Release lock.
mLock.unlock();
// Invoke all response callbacks.
for (size_t i = 0; i < mResponses.size(); i++)
Response& response = mResponses.editItemAt(i);
if (response.request.ident == POLL_CALLBACK)
int fd = response.request.fd;
int events = response.events;
void* data = response.request.data;
#if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS
ALOGD("%p ~ pollOnce - invoking fd event callback %p: fd=%d, events=0x%x, data=%p",
this, response.request.callback.get(), fd, events, data);
#endif
// Invoke the callback. Note that the file descriptor may be closed by
// the callback (and potentially even reused) before the function returns so
// we need to be a little careful when removing the file descriptor afterwards.
int callbackResult = response.request.callback->handleEvent(fd, events, data);
if (callbackResult == 0)
removeFd(fd, response.request.seq);
// Clear the callback reference in the response structure promptly because we
// will not clear the response vector itself until the next poll.
response.request.callback.clear();
result = POLL_CALLBACK;
return result;
2. Handler消息机制-Native层
2.1 类比FWK层Handler消息机制对象
Native层自身有套Handler机制,Looper.h
文件中,定义了Message结构体,MessageHandler消息处理类,LooperCallback 回调类,Looper类。Native也有Looper::prepare()
、Looper::sendMessage()
、Looper::wake()
、Looper::pollOnce()
等,这里不在描述类比查看Handler消息机制-FWK层,注意上面nativePollOnce阻塞操作。接下来系统中LooperCallback使用案例。
system/core/libutils/include/utils/Looper.h
/**
* A message that can be posted to a Looper.
*/
struct Message
Message() : what(0)
Message(int w) : what(w)
/* The message type. (interpretation is left up to the handler) */
int what;
;
/**
* Interface for a Looper message handler.
*
* The Looper holds a strong reference to the message handler whenever it has
* a message to deliver to it. Make sure to call Looper::removeMessages
* to remove any pending messages destined for the handler so that the handler
* can be destroyed.
*/
class MessageHandler : public virtual RefBase
protected:
virtual ~MessageHandler();
public:
/**
* Handles a message.
*/
virtual void handleMessage(const Message& message) = 0;
;
/**
* A simple proxy that holds a weak reference to a message handler.
*/
class WeakMessageHandler : public MessageHandler
protected:
virtual ~WeakMessageHandler();
public:
WeakMessageHandler(const wp<MessageHandler>& handler);
virtual void handleMessage(const Message& message);
private:
wp<MessageHandler> mHandler;
;
/**
* A looper callback.
*/
class LooperCallback : public virtual RefBase
protected:
virtual ~LooperCallback();
public:
/**
* Handles a poll event for the given file descriptor.
* It is given the file descriptor it is associated with,
* a bitmask of the poll events that were triggered (typically EVENT_INPUT),
* and the data pointer that was originally supplied.
*
* Implementations should return 1 to continue receiving callbacks, or 0
* to have this file descriptor and callback unregistered from the looper.
*/
virtual int handleEvent(int fd, int events, void* data) = 0;
;
2.2 LooperCallback使用案例
2.2.1 SensorService中Receiver
APP注册SensorEventListener-Android12
SensorService数据传递给APP-Android12
- 注册Sensor监听JNI Natvie层创建的接收器Receiver继承
LooperCallback
,这里fd就是mReceiveFd
- 在查看
mSendFd
监听,是在注册创建SensorEventConnection后,updateLooperRegistration更新Looper,这里的Looper获取的是SensorService中的mLooper
frameworks/native/libs/sensor/BitTube.cpp
int BitTube::getFd() const
return mReceiveFd;
int BitTube::getSendFd() const
return mSendFd;
2.2.2 InputManagerService中InputChannel
- 服务端serverChannel添加
LooperEventCallback
- clientChannel给到上层,InputEventReceiver在通过JNI初始化NativeInputEventReceiver后调用
receiver->initialize() --> NativeInputEventReceiver::setFdEvents()
frameworks/native/libs/input/InputTransport.cpp
status_t InputChannel::openInputChannelPair(const std::string& name,
std::unique_ptr<InputChannel>& outServerChannel,
std::unique_ptr<InputChannel>& outClientChannel)
int sockets[2];
if (socketpair(AF_UNIX, SOCK_SEQPACKET, 0, sockets))
status_t result = -errno;
ALOGE("channel '%s' ~ Could not create socket pair. errno=%s(%d)", name.c_str(),
strerror(errno), errno);
outServerChannel.reset();
outClientChannel.reset();
return result;
int bufferSize = SOCKET_BUFFER_SIZE;
setsockopt(sockets[0], SOL_SOCKET, SO_SNDBUF, &bufferSize, sizeof(bufferSize));
setsockopt(sockets[0], SOL_SOCKET, SO_RCVBUF, &bufferSize, sizeof(bufferSize));
setsockopt(sockets[1], SOL_SOCKET, SO_SNDBUF, &bufferSize, sizeof(bufferSize));
setsockopt(sockets[1], SOL_SOCKET, SO_RCVBUF, &bufferSize, sizeof(bufferSize));
sp<IBinder> token = new BBinder();
std::string serverChannelName = name + " (server)";
android::base::unique_fd serverFd(sockets[0]);
outServerChannel = InputChannel::create(serverChannelName, std::move(serverFd), token);
std::string clientChannelName = name + " (client)";
android::base::unique_fd clientFd(sockets[1]);
outClientChannel = InputChannel::create(clientChannelName, std::move(clientFd), token);
return OK;
3 FWK层和Native层互操 待续~
以上是关于Handler消息机制-Native层的主要内容,如果未能解决你的问题,请参考以下文章
深入理解Android Handler机制(深入至native层)
深入理解Android Handler机制(深入至native层)
Binder 机制Native 层 Binder 机制分析 ( 查找 Binder 服务 | svcmgr_handler | do_find_service )
Binder 机制Native 层 Binder 机制分析 ( binder_loop | svcmgr_handler | binder.c | binder_parse )