系統服務的使用Android跨進程通信IPC之20——系統服務的使用
1 ServiceManager.getService()方法
//frameworks/base/core/java/android/os/ServiceManager.java 49行
public static IBinder getService(String name) {
try {
//先從緩存中查看
IBinder service = sCache.get(name);
if (service != null) {
return service;
} else {
return getIServiceManager().getService(name);
}
} catch (RemoteException e) {
Log.e(TAG, "error in getService", e);
}
return null;
}
- 先從緩存中取出,如果有,則直接return。其中sCache是以HashMap格式的緩存
- 2、如果沒有調用getIServiceManager().getService(name)獲取一個,并且return
在Android跨進程通信IPC之17——Binder之Framework層Java篇--注冊服務中我們知道
getIServiceManager()
等價于
new ServiceManagerProxy(new BinderProxy())
2 ServiceManagerProxy.getService(name)
// frameworks/base/core/java/android/os/ServiceManagerNative.java 118行
public IBinder getService(String name) throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
//mRemote為BinderProxy
mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
//從replay里面解析出獲取的IBinder對象
IBinder binder = reply.readStrongBinder();
reply.recycle();
data.recycle();
return binder;
}
這里面有兩個重點方法,一個是 mRemote.transact(),一個是 reply.readStrongBinder()。那我們就逐步研究下
3 mRemote.transact()方法
我們mRemote其實是BinderPoxy,那我們來看下BinderProxy的transact方法
//frameworks/base/core/java/android/os/Binder.java 501行
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");
if (Binder.isTracingEnabled()) { Binder.getTransactionTracker().addTrace(); }
return transactNative(code, data, reply, flags);
}
// frameworks/base/core/java/android/os/Binder.java 507行
public native boolean transactNative(int code, Parcel data, Parcel reply,
int flags) throws RemoteException;
關于Binder.checkParcel()方法,上面已經說過了,就不詳細說了。transact()方法其實是調用了natvie的transactNative()方法,這樣就進入了JNI里面了
3.1 mRemote.transact()方法
// frameworks/base/core/jni/android_util_Binder.cpp 1083行
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException
{
if (dataObj == NULL) {
jniThrowNullPointerException(env, NULL);
return JNI_FALSE;
}
//Java的 Parcel 轉為native的 Parcel
Parcel* data = parcelForJavaObject(env, dataObj);
if (data == NULL) {
return JNI_FALSE;
}
Parcel* reply = parcelForJavaObject(env, replyObj);
if (reply == NULL && replyObj != NULL) {
return JNI_FALSE;
}
// gBinderProxyOffsets.mObject中保存的的是new BpBinder(0)對象
IBinder* target = (IBinder*)
env->GetLongField(obj, gBinderProxyOffsets.mObject);
if (target == NULL) {
jniThrowException(env, "java/lang/IllegalStateException", "Binder has been finalized!");
return JNI_FALSE;
}
ALOGV("Java code calling transact on %p in Java object %p with code %" PRId32 "\n",
target, obj, code);
bool time_binder_calls;
int64_t start_millis;
if (kEnableBinderSample) {
// Only log the binder call duration for things on the Java-level main thread.
// But if we don't
time_binder_calls = should_time_binder_calls();
if (time_binder_calls) {
start_millis = uptimeMillis();
}
}
//printf("Transact from Java code to %p sending: ", target); data->print();
//gBinderProxyOffseets.mObject中保存的是new BpBinder(0) 對象
status_t err = target->transact(code, *data, reply, flags);
//if (reply) printf("Transact from Java code to %p received: ", target); reply->print();
if (kEnableBinderSample) {
if (time_binder_calls) {
conditionally_log_binder_call(start_millis, target, code);
}
}
if (err == NO_ERROR) {
return JNI_TRUE;
} else if (err == UNKNOWN_TRANSACTION) {
return JNI_FALSE;
}
signalExceptionForError(env, obj, err, true /*canThrowRemoteException*/, data->dataSize());
return JNI_FALSE;
}
上面代碼中,有一段重點代碼
status_t err = target->transact(code, *data, reply, flags);
3.2 BpBinder::transact()函數
/frameworks/native/libs/binder/BpBinder.cpp 159行
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
其實是調用的IPCThreadState的transact()函數
3.3 BpBinder::transact()函數
//frameworks/native/libs/binder/IPCThreadState.cpp 548行
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck(); //數據錯誤檢查
flags |= TF_ACCEPT_FDS;
....
if (err == NO_ERROR) {
// 傳輸數據
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
...
// 默認情況下,都是采用非oneway的方式, 也就是需要等待服務端的返回結果
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
//等待回應事件
err = waitForResponse(reply);
}else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
主要就是兩個步驟
- 首先,調用writeTransactionData()函數 傳輸數據
- 其次,調用waitForResponse()函數來獲取返回結果
那我們來看下waitForResponse()函數里面的重點實現
3.4 IPCThreadState::waitForResponse函數
//frameworks/native/libs/binder/IPCThreadState.cpp 712行
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
...
cmd = mIn.readInt32();
switch (cmd) {
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
//當reply對象回收時,則會調用freeBuffer來回收內存
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
...
}
}
}
case :...
}
}
...
return err;
}
這時候就在等待回復了,如果有回復,則通過cmd = mIn.readInt32()函數獲取命令
3.5 IPCThreadState::waitForResponse函數
//
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
//free buffer命令
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
// reply命令
data.cmd_reply = BC_REPLY; // reply命令
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
...
} else {=
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
//向Binder驅動通信
binder_write(bs, &data, sizeof(data));
}
binder_write將BC_FREE_BUFFER和BC_REPLY命令協議發送給驅動,進入驅動。
在驅動里面bingder_ioctl -> binder_ioctl_write_read ->binder_thread_write,由于是BC_REPLY命令協議,則進入binder_transaction,該方法會向請求服務的線程TODO隊列插入事務。接來下,請求服務的進程在執行talkWithDriver的過程執行到binder_thread_read(),處理TODO隊列的事物。
4 Parcel.readStrongBinder()方法
其實Parcel.readStrongBinder()的過程基本上就是writeStrongBinder的過程。
我們先來看下它的源碼
//frameworks/base/core/java/android/os/Parcel.java 1686行
/**
* Read an object from the parcel at the current dataPosition().
* 在當前的 dataPosition()位置上讀取一個對象
*/
public final IBinder readStrongBinder() {
return nativeReadStrongBinder(mNativePtr);
}
private static native IBinder nativeReadStrongBinder(long nativePtr);
其實它內部是調用的是nativeReadStrongBinder()方法,通過上面的源碼我們知道nativeReadStrongBinder是一個native的方法,所以通過JNI調用到android_os_Parcel_readStrongBinder這個函數
4.1 android_os_Parcel_readStrongBinder()函數
//frameworks/base/core/jni/android_os_Parcel.cpp 429行
static jobject android_os_Parcel_readStrongBinder(JNIEnv* env, jclass clazz, jlong nativePtr)
{
Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
if (parcel != NULL) {
return javaObjectForIBinder(env, parcel->readStrongBinder());
}
return NULL;
}
javaObjectForIBinder將native層BpBinder對象轉換為Java層的BinderProxy對象。
上面的函數中,調用了readStrongBinder()函數
4.2 readStrongBinder()函數
//frameworks/native/libs/binder/Parcel.cpp 1334行
sp<IBinder> Parcel::readStrongBinder() const
{
sp<IBinder> val;
unflatten_binder(ProcessState::self(), *this, &val);
return val;
}
這里面也很簡單,主要是調用unflatten_binder()函數
4.3 unflatten_binder()函數
//frameworks/native/libs/binder/Parcel.cpp 293行
status_t unflatten_binder(const sp<ProcessState>& proc,
const Parcel& in, sp<IBinder>* out)
{
const flat_binder_object* flat = in.readObject(false);
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
*out = reinterpret_cast<IBinder*>(flat->cookie);
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_HANDLE:
//進入該分支
*out = proc->getStrongProxyForHandle(flat->handle);
//創建BpBinder對象
return finish_unflatten_binder(
static_cast<BpBinder*>(out->get()), *flat, in);
}
}
return BAD_TYPE;
}
PS:在/frameworks/native/libs/binder/Parcel.cpp/frameworks/native/libs/binder/Parcel.cpp 里面有兩個unflatten_binder()函數,其中區別點是,最后一個入參,一個是sp<IBinder>* out,另一個是wp<IBinder>* out。大家別弄差了。
在unflatten_binder里面進入 case BINDER_TYPE_HANDLE: 分支,然后執行getStrongProxyForHandle()函數。
4.4 getStrongProxyForHandle()函數
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
//查找handle對應的資源項
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
...
//當handle值所對應的IBinder不存在或弱引用無效時,則創建BpBinder對象
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
經過該方法,最終創建了指向Binder服務端的BpBinder代理對象。所以說javaObjectForIBinder將native層的BpBinder對象轉化為Java層BinderProxy對象。也就是說通過getService()最終取得了指向目標Binder服務器的代理對象BinderProxy。
5 總結
所以說getService的核心過程:
public static IBinder getService(String name) {
...
//此處還需要將Java層的Parcel轉化為Native層的Parcel
Parcel reply = Parcel.obtain();
// 與Binder驅動交互
BpBinder::transact(GET_SERVICE_TRANSACTION, *data, reply, 0);
IBinder binder = javaObjectForIBinder(env, new BpBinder(handle));
...
}
javaObjectForIBinder作用是創建BinderProxy對象,并將BpBinder對象的地址保存到BinderProxy對象的mObjects中,獲取服務過程就是通過BpBinder來發送GET_SERVICE_TRANSACTION命令,實現與binder驅動進行數據交互。