Bitmap 是怎么开辟内存的?内存是怎么复用和销毁的?本地资源图片应该怎么去做适配?
打开我们自己的 APP 发现占用内存较大的一般都是本地资源图片,我们该如何去优化这些内存?
大家以后如果有涉及直播这一块的业务,直播间会有各种活动和各种复杂动画,线上 buggly 肯定会有大量的 OOM ,我们怎样才能在 OOM 前去 dump 线上内存来做优化分析?
Bitmap 我们是再熟悉不过了,首先抛几个问题让我们一起来思考一下,如果以上几个问题大家都能找到解决方案,相信我们在以后的开发过程中会省下许多事。
1. Bitmap 的内存大小
不知大家是否还记得在 Android图片压缩加密上传 - JPEG压缩算法解析 这篇文章中有一个题目:一张 864x582 的 PNG 图片,我把它放到 drawable-xhdpi 目录下,在红米 Note3 上加载,占用内存是多少(1920x1080像素 ,5.5英寸 )? 我们清晰的知道 图片大小 = 宽 * 高 * 单个像素点所占字节数,那么这么一算大小应该是 864x582x4 = 2011392 ,但最终调用代码 Bitmap.getByteCount() 发现是 3465504。 难道我们刚刚所讲的公式不对?其实这里的宽高是 Bitmap 的宽高并不是资源图片的宽高:
Bitmap 大小 = bitmap.getWidth() * bitmap.getHeight() * 单个像素点所占字节数 = 1134 * 764 * 4 = 3465504。
那这里的宽高是怎样计算而来的?我们通过跟踪源码发现 width 和 height 都是在 Bitmap 的构造函数中赋值的:
/**
* Private constructor that must received an already allocated native bitmap
* int (pointer).
*/
// called from JNI
Bitmap(long nativeBitmap, int width, int height, int density, boolean isMutable, boolean requestPremultiplied, byte[] ninePatchChunk, NinePatch.InsetStruct ninePatchInsets) { if (nativeBitmap == 0) { throw new RuntimeException("internal error: native bitmap is 0");
}
mWidth = width;
mHeight = height;
mIsMutable = isMutable;
mRequestPremultiplied = requestPremultiplied;
mNinePatchChunk = ninePatchChunk;
mNinePatchInsets = ninePatchInsets; if (density >= 0) {
mDensity = density;
}
mNativePtr = nativeBitmap; long nativeSize = NATIVE_ALLOCATION_SIZE + getAllocationByteCount();
NativeAllocationRegistry registry = new NativeAllocationRegistry(
Bitmap.class.getClassLoader(), nativeGetNativeFinalizer(), nativeSize);
registry.registerNativeAllocation(this, nativeBitmap); if (ResourcesImpl.TRACE_FOR_DETAILED_PRELOAD) {
sPreloadTracingNumInstantiatedBitmaps++;
sPreloadTracingTotalBitmapsSize += nativeSize;
}
}called from JNI 这个解释其实已经很明确了,也就是说这个对象是 Native 层构建返回的。因此我们跟踪到 BitmapFactory.decodeResource() 中去看看:
public static Bitmap decodeResource(Resources res, int id, Options opts) {
validate(opts);
Bitmap bm = null;
InputStream is = null;
try { final TypedValue value = new TypedValue();
is = res.openRawResource(id, value);
bm = decodeResourceStream(res, value, is, null, opts);
} catch (Exception e) { /* do nothing.
If the exception happened on open, bm will be null.
If it happened on close, bm is still valid.
*/
} finally { try { if (is != null) is.close();
} catch (IOException e) { // Ignore
}
} if (bm == null && opts != null && opts.inBitmap != null) { throw new IllegalArgumentException("Problem decoding into existing bitmap");
} return bm;
} public static Bitmap decodeResourceStream(@Nullable Resources res, @Nullable TypedValue value,
@Nullable InputStream is, @Nullable Rect pad, @Nullable Options opts) {
validate(opts); if (opts == null) {
opts = new Options();
}
if (opts.inDensity == 0 && value != null) { final int density = value.density; if (density == TypedValue.DENSITY_DEFAULT) {
opts.inDensity = DisplayMetrics.DENSITY_DEFAULT;
} else if (density != TypedValue.DENSITY_NONE) {
opts.inDensity = density;
}
} // 获取当前手机设备的 dpi
if (opts.inTargetDensity == 0 && res != null) {
opts.inTargetDensity = res.getDisplayMetrics().densityDpi;
}
return decodeStream(is, pad, opts);
} // 省略部分跟踪代码 ......
private static native Bitmap nativeDecodeStream(InputStream is, byte[] storage,
Rect padding, Options opts);最终调用的是 native 方法 nativeDecodeStream ,这部分源码建议大家在 http://androidxref.com/ 上看,因为不同版本之间有差异,我们不能只看一个版本。当然也可以每个版本都去下载,但需要一百多G的内存。这里以 Android N 版本为例:
/frameworks/base/core/jni/android/graphics/BitmapFactory.cpp
static jobject nativeDecodeStream(JNIEnv *env, jobject clazz, jobject is, jbyteArray storage,
jobject padding, jobject options) {
jobject bitmap = NULL; std::unique_ptr<SkStream> stream(CreateJavaInputStreamAdaptor(env, is, storage)); if (stream.get()) { std::unique_ptr<SkStreamRewindable> bufferedStream(
SkFrontBufferedStream::Create(stream.release(), SkCodec::MinBufferedBytesNeeded()));
SkASSERT(bufferedStream.get() != NULL);
bitmap = doDecode(env, bufferedStream.release(), padding, options);
} return bitmap;
}static jobject doDecode(JNIEnv *env, SkStreamRewindable *stream, jobject padding, jobject options) { // This function takes ownership of the input stream. Since the SkAndroidCodec
// will take ownership of the stream, we don't necessarily need to take ownership
// here. This is a precaution - if we were to return before creating the codec,
// we need to make sure that we delete the stream.
std::unique_ptr<SkStreamRewindable> streamDeleter(stream); // Set default values for the options parameters.
int sampleSize = 1; // 是否只是获取图片的大小
bool onlyDecodeSize = false;
SkColorType prefColorType = kN32_SkColorType; bool isMutable = false; float scale = 1.0f; bool requireUnpremultiplied = false;
jobject javaBitmap = NULL; // Update with options supplied by the client.
// 解析 options 参数
if (options != NULL) {
sampleSize = env->GetIntField(options, gOptions_sampleSizeFieldID); // Correct a non-positive sampleSize. sampleSize defaults to zero within the
// options object, which is strange.
if (sampleSize <= 0) {
sampleSize = 1;
} if (env->GetBooleanField(options, gOptions_justBoundsFieldID)) {
onlyDecodeSize = true;
} // initialize these, in case we fail later on
env->SetIntField(options, gOptions_widthFieldID, -1);
env->SetIntField(options, gOptions_heightFieldID, -1);
env->SetObjectField(options, gOptions_mimeFieldID, 0); // 解析 ColorType ,复用参数等等
jobject jconfig = env->GetObjectField(options, gOptions_configFieldID);
prefColorType = GraphicsJNI::getNativeBitmapColorType(env, jconfig);
isMutable = env->GetBooleanField(options, gOptions_mutableFieldID);
requireUnpremultiplied = !env->GetBooleanField(options, gOptions_premultipliedFieldID);
javaBitmap = env->GetObjectField(options, gOptions_bitmapFieldID); // 计算缩放的比例
if (env->GetBooleanField(options, gOptions_scaledFieldID)) { // 获取图片当前 xhdpi 的 density
const int density = env->GetIntField(options, gOptions_densityFieldID); // 获取当前设备的 dpi
const int targetDensity = env->GetIntField(options, gOptions_targetDensityFieldID); const int screenDensity = env->GetIntField(options, gOptions_screenDensityFieldID); if (density != 0 && targetDensity != 0 && density != screenDensity) { // scale = 当前设备的 dpi / xhdpi 的 density
// scale = 420/320 = 1.3125
scale = (float) targetDensity / density;
}
}
} // Create the codec.
NinePatchPeeker peeker; std::unique_ptr<SkAndroidCodec> codec(SkAndroidCodec::NewFromStream(streamDeleter.release(), 280 & peeker)); if (!codec.get()) { return nullObjectReturn("SkAndroidCodec::NewFromStream returned null");
} // Do not allow ninepatch decodes to 565. In the past, decodes to 565
// would dither, and we do not want to pre-dither ninepatches, since we
// know that they will be stretched. We no longer dither 565 decodes,
// but we continue to prevent ninepatches from decoding to 565, in order
// to maintain the old behavior.
if (peeker.mPatch && kRGB_565_SkColorType == prefColorType) {
prefColorType = kN32_SkColorType;
} // 获取当前图片的大小
// Determine the output size.
SkISize size = codec->getSampledDimensions(sampleSize); int scaledWidth = size.width(); int scaledHeight = size.height(); bool willScale = false; // 处理 simpleSize 压缩,我们这里没穿,上面默认是 1
// Apply a fine scaling step if necessary.
if (needsFineScale(codec->getInfo().dimensions(), size, sampleSize)) {
willScale = true;
scaledWidth = codec->getInfo().width() / sampleSize;
scaledHeight = codec->getInfo().height() / sampleSize;
} // Set the options and return if the client only wants the size.
if (options != NULL) {
jstring mimeType = encodedFormatToString(env, codec->getEncodedFormat()); if (env->ExceptionCheck()) { return nullObjectReturn("OOM in encodedFormatToString()");
} // 设置 options 对象中的 outWidth 和 outHeight
env->SetIntField(options, gOptions_widthFieldID, scaledWidth);
env->SetIntField(options, gOptions_heightFieldID, scaledHeight);
env->SetObjectField(options, gOptions_mimeFieldID, mimeType); // 如果只是获取大小直接 return null 这里是 nullptr 而不是 NULL
if (onlyDecodeSize) { return nullptr;
}
} // Scale is necessary due to density differences.
if (scale != 1.0f) {
willScale = true; // 计算 scaledWidth 和 scaledHeight
// scaledWidth = 864 * 1.3125 + 0.5f = 1134 + 0.5f = 1134
scaledWidth = static_cast<int>(scaledWidth * scale + 0.5f); // scaledHeight = 582 * 1.3125 + 0.5f = 763.875 + 0.5f = 764
scaledHeight = static_cast<int>(scaledHeight * scale + 0.5f);
} // 判断是否有复用的 Bitmap
android::Bitmap *reuseBitmap = nullptr; unsigned int existingBufferSize = 0; if (javaBitmap != NULL) {
reuseBitmap = GraphicsJNI::getBitmap(env, javaBitmap); if (reuseBitmap->peekAtPixelRef()->isImmutable()) { // 无法重用一个不变的位图图像解码器的目标。
ALOGW("Unable to reuse an immutable bitmap as an image decoder target.");
javaBitmap = NULL;
reuseBitmap = nullptr;
} else {
existingBufferSize = GraphicsJNI::getBitmapAllocationByteCount(env, javaBitmap);
}
}
JavaPixelAllocator javaAllocator(env); RecyclingPixelAllocator recyclingAllocator(reuseBitmap, existingBufferSize); ScaleCheckingAllocator scaleCheckingAllocator(scale, existingBufferSize);
SkBitmap::HeapAllocator heapAllocator;
SkBitmap::Allocator *decodeAllocator; if (javaBitmap != nullptr && willScale) { // This will allocate pixels using a HeapAllocator, since there will be an extra
// scaling step that copies these pixels into Java memory. This allocator
// also checks that the recycled javaBitmap is large enough.
decodeAllocator = &scaleCheckingAllocator;
} else if (javaBitmap != nullptr) {
decodeAllocator = &recyclingAllocator;
} else if (willScale) { // This will allocate pixels using a HeapAllocator, since there will be an extra
// scaling step that copies these pixels into Java memory.
decodeAllocator = &heapAllocator;
} else {
decodeAllocator = &javaAllocator;
} // Set the decode colorType. This is necessary because we can't always support
// the requested colorType.
SkColorType decodeColorType = codec->computeOutputColorType(prefColorType); // Construct a color table for the decode if necessary
SkAutoTUnref <SkColorTable> colorTable(nullptr);
SkPMColor *colorPtr = nullptr; int *colorCount = nullptr; int maxColors = 256;
SkPMColor colors[256]; if (kIndex_8_SkColorType == decodeColorType) {
colorTable.reset(new SkColorTable(colors, maxColors)); // SkColorTable expects us to initialize all of the colors before creating an
// SkColorTable. However, we are using SkBitmap with an Allocator to allocate
// memory for the decode, so we need to create the SkColorTable before decoding.
// It is safe for SkAndroidCodec to modify the colors because this SkBitmap is
// not being used elsewhere.
colorPtr = const_cast<SkPMColor *>(colorTable->readColors());
colorCount = &maxColors;
} // Set the alpha type for the decode.
SkAlphaType alphaType = codec->computeOutputAlphaType(requireUnpremultiplied); // 创建 SkImageInfo 信息,宽,高,ColorType,alphaType
const SkImageInfo decodeInfo = SkImageInfo::Make(size.width(), size.height(), decodeColorType,
alphaType);
SkImageInfo bitmapInfo = decodeInfo; if (decodeColorType == kGray_8_SkColorType) { // The legacy implementation of BitmapFactory used kAlpha8 for
// grayscale images (before kGray8 existed). While the codec
// recognizes kGray8, we need to decode into a kAlpha8 bitmap
// in order to avoid a behavior change.
bitmapInfo = SkImageInfo::MakeA8(size.width(), size.height());
} // 解析 SkBitmap 设置 bitmapInfo,tryAllocPixels 开辟内存,具体分析在后面
SkBitmap decodingBitmap; if (!decodingBitmap.setInfo(bitmapInfo) ||
!decodingBitmap.tryAllocPixels(decodeAllocator, colorTable)) { // SkAndroidCodec should recommend a valid SkImageInfo, so setInfo()
// should only only fail if the calculated value for rowBytes is too
// large.
// tryAllocPixels() can fail due to OOM on the Java heap, OOM on the
// native heap, or the recycled javaBitmap being too small to reuse.
return nullptr;
} // Use SkAndroidCodec to perform the decode.
SkAndroidCodec::AndroidOptions codecOptions;
codecOptions.fZeroInitialized = (decodeAllocator == &javaAllocator) ?
SkCodec::kYes_ZeroInitialized : SkCodec::kNo_ZeroInitialized;
codecOptions.fColorPtr = colorPtr;
codecOptions.fColorCount = colorCount;
codecOptions.fSampleSize = sampleSize; // 解析获取像素值
SkCodec::Result result = codec->getAndroidPixels(decodeInfo, decodingBitmap.getPixels(),
decodingBitmap.rowBytes(), &codecOptions); switch (result) { case SkCodec::kSuccess: case SkCodec::kIncompleteInput: break; default: return nullObjectReturn("codec->getAndroidPixels() failed.");
}
jbyteArray ninePatchChunk = NULL; if (peeker.mPatch != NULL) { if (willScale) {
scaleNinePatchChunk(peeker.mPatch, scale, scaledWidth, scaledHeight);
} size_t ninePatchArraySize = peeker.mPatch->serializedSize();
ninePatchChunk = env->NewByteArray(ninePatchArraySize); if (ninePatchChunk == NULL) { return nullObjectReturn("ninePatchChunk == null");
}
jbyte *array = (jbyte *) env->GetPrimitiveArrayCritical(ninePatchChunk, NULL); if (array == NULL) { return nullObjectReturn("primitive array == null");
} memcpy(array, peeker.mPatch, peeker.mPatchSize);
env->ReleasePrimitiveArrayCritical(ninePatchChunk, array, 0);
}
jobject ninePatchInsets = NULL; if (peeker.mHasInsets) {
ninePatchInsets = env->NewObject(gInsetStruct_class, gInsetStruct_constructorMethodID,
peeker.mOpticalInsets[0], peeker.mOpticalInsets[1], peeker.mOpticalInsets[2], peeker.mOpticalInsets[3],
peeker.mOutlineInsets[0], peeker.mOutlineInsets[1], peeker.mOutlineInsets[2], peeker.mOutlineInsets[3],
peeker.mOutlineRadius, peeker.mOutlineAlpha, scale); if (ninePatchInsets == NULL) { return nullObjectReturn("nine patch insets == null");
} if (javaBitmap != NULL) {
env->SetObjectField(javaBitmap, gBitmap_ninePatchInsetsFieldID, ninePatchInsets);
}
} // 构建 SkBitmap 这个才是最终的
SkBitmap outputBitmap; if (willScale) { // 如果需要缩放,那需要重新创建一张图片,上面加载的是图片的本身大小
// This is weird so let me explain: we could use the scale parameter
// directly, but for historical reasons this is how the corresponding
// Dalvik code has always behaved. We simply recreate the behavior here.
// The result is slightly different from simply using scale because of
// the 0.5f rounding bias applied when computing the target image size
const float sx = scaledWidth / float(decodingBitmap.width()); const float sy = scaledHeight / float(decodingBitmap.height()); // Set the allocator for the outputBitmap.
SkBitmap::Allocator *outputAllocator; if (javaBitmap != nullptr) {
outputAllocator = &recyclingAllocator;
} else {
outputAllocator = &javaAllocator;
}
SkColorType scaledColorType = colorTypeForScaledOutput(decodingBitmap.colorType()); // FIXME: If the alphaType is kUnpremul and the image has alpha, the
// colors may not be correct, since Skia does not yet support drawing
// to/from unpremultiplied bitmaps.
// 设置 SkImageInfo ,注意这里是 scaledWidth ,scaledHeight
outputBitmap.setInfo(SkImageInfo::Make(scaledWidth, scaledHeight,
scaledColorType, decodingBitmap.alphaType())); // 开辟当前 Bitmap 图片的内存
if (!outputBitmap.tryAllocPixels(outputAllocator, NULL)) { // This should only fail on OOM. The recyclingAllocator should have
// enough memory since we check this before decoding using the
// scaleCheckingAllocator.
return nullObjectReturn("allocation failed for scaled bitmap");
}
SkPaint paint; // kSrc_Mode instructs us to overwrite the unininitialized pixels in
// outputBitmap. Otherwise we would blend by default, which is not
// what we want.
paint.setXfermodeMode(SkXfermode::kSrc_Mode);
paint.setFilterQuality(kLow_SkFilterQuality); // decodingBitmap -> 画到 outputBitmap
SkCanvas canvas(outputBitmap);
canvas.scale(sx, sy);
canvas.drawBitmap(decodingBitmap, 0.0f, 0.0f, &paint);
} else {
outputBitmap.swap(decodingBitmap);
} if (padding) { if (peeker.mPatch != NULL) {
GraphicsJNI::set_jrect(env, padding,
peeker.mPatch->paddingLeft, peeker.mPatch->paddingTop,
peeker.mPatch->paddingRight, peeker.mPatch->paddingBottom);
} else {
GraphicsJNI::set_jrect(env, padding, -1, -1, -1, -1);
}
} // If we get here, the outputBitmap should have an installed pixelref.
if (outputBitmap.pixelRef() == NULL) { return nullObjectReturn("Got null SkPixelRef");
}
if (!isMutable && javaBitmap == NULL) { // promise we will never change our pixels (great for sharing and pictures)
outputBitmap.setImmutable();
} // 如果有复用返回原来的 javaBitmap
bool isPremultiplied = !requireUnpremultiplied; if (javaBitmap != nullptr) {
GraphicsJNI::reinitBitmap(env, javaBitmap, outputBitmap.info(), isPremultiplied);
outputBitmap.notifyPixelsChanged(); // If a java bitmap was passed in for reuse, pass it back
return javaBitmap;
} int bitmapCreateFlags = 0x0; if (isMutable) bitmapCreateFlags |= GraphicsJNI::kBitmapCreateFlag_Mutable; if (isPremultiplied) bitmapCreateFlags |= GraphicsJNI::kBitmapCreateFlag_Premultiplied; // 没有复用的 Bitmap 创建一个新的 Bitmap
// now create the java bitmap
return GraphicsJNI::createBitmap(env, javaAllocator.getStorageObjAndReset(),
bitmapCreateFlags, ninePatchChunk, ninePatchInsets, -1);
}
jobject GraphicsJNI::createBitmap(JNIEnv *env, android::Bitmap *bitmap, int bitmapCreateFlags, jbyteArray ninePatchChunk,
jobject ninePatchInsets, int density) { bool isMutable = bitmapCreateFlags & kBitmapCreateFlag_Mutable; bool isPremultiplied = bitmapCreateFlags & kBitmapCreateFlag_Premultiplied; // The caller needs to have already set the alpha type properly, so the
// native SkBitmap stays in sync with the Java Bitmap.
assert_premultiplied(bitmap->info(), isPremultiplied);
jobject obj = env->NewObject(gBitmap_class, gBitmap_constructorMethodID, reinterpret_cast<jlong>(bitmap), bitmap->javaByteArray(),
bitmap->width(), bitmap->height(), density, isMutable, isPremultiplied,
ninePatchChunk, ninePatchInsets);
hasException(env); // For the side effect of logging.
return obj;
}上面的代码看起来比较长,其实是非常简单的,相信大家都能看得懂,这里我对上面的流程再做一些总结:
解析 java 层传递过来的 Options 的参数,如 simpleSize ,isMutable,javaBitmap 等等,同时计算出 scale 。
获取当前图片的大小,根据 sampleSize 判断是否需要压缩,同时计算出 scaledWidth ,scaledHeight。
设置 options 宽高为 scaledWidth ,scaledHeight ,如果只是解析宽高那么就直接返回,也就是 options.inJustDecodeBounds = true 时,但是这里需要注意返回的是,资源图片的宽高并不是 Bitmap 最终的宽高。(我们大部分人对这个有误解)
创建 native 层的 SkImageInfo 和 SkBitmap ,然后调用 tryAllocPixels 去开辟图片的内存空间,然后调用 getAndroidPixels 去解析像素值 ,这里的 decodingBitmap 也并不是最终需要返回的 Bitmap ,而是原资源图片的 Bitmap 。
构建需要返回的 outputBitmap ,如果需要缩放那么重新去开辟一块内存空间,如果不需要缩放直接调用 swap 方法即可。最后判断有没有复用的 JavaBitmap ,如果有复用调用 reinitBitmap 然后直接返回,如果没有则调用 createBitmap 去创建一个新的 Bitmap 。
通过上面的分析,我们可能会有疑问?我们调用了两次 tryAllocPixels ,那如果加载一张 (1440x2560) 10M 的图片,岂不是需要 20M 的内存?
温馨提示:有 Java 内存和 Native 内存之分。
2. Bitmap 的内存申请
Bitmap 的内存申请不同版本间有些许差异,在 3.0-7.0 的 bitmap 像素内存都是存放在 Java heap 中的,而 8.0 以后则是放在 Native heap 中的,我们可能会想这有啥区别?请看一个简单的事例:
@Override
protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
logMemory();
Bitmap bitmap = Bitmap.createBitmap(1024, 1024 * 500, Bitmap.Config.ARGB_8888);
logMemory();
}
private void logMemory() {
ActivityManager activityManager = (ActivityManager) getSystemService(Context.ACTIVITY_SERVICE);
ActivityManager.MemoryInfo memoryInfo = new ActivityManager.MemoryInfo();
activityManager.getMemoryInfo(memoryInfo);
Log.e("TAG", "AvailMem :" + memoryInfo.availMem / 1024 / 1024);
Log.e("TAG", "lowMemory:" + memoryInfo.lowMemory);
Log.e("TAG", "NativeHeapAllocatedSize :" + Debug.getNativeHeapAllocatedSize() / 1024 / 1024);
}上面我们创建了一张 2G 大小的 bitmap 我们在 8.0 以下的版本运行是会 OOM 的,而我们在 8.0 以上的版本运行是完全没问题,但 Native 内存多了 2G 的内存。
E/TAG: AvailMem :1654 E/TAG: lowMemory:falseE/TAG: NativeHeapAllocatedSize :4 E/TAG: AvailMem :1656 E/TAG: lowMemory:falseE/TAG: NativeHeapAllocatedSize :2052
通过之前的源码分析可知 bitmap 的内存创建都是通过 tryAllocPixels 方法来申请的,我们通过源码来对比一下他们之间的区别,我们首先来看下 7.0 的代码:
/frameworks/base/core/jni/android/graphics/Bitmap.cpp
bool SkBitmap::tryAllocPixels(Allocator *allocator, SkColorTable *ctable) {
HeapAllocator stdalloc; if (nullptr == allocator) {
allocator = &stdalloc;
} return allocator->allocPixelRef(this, ctable);
}
bool JavaPixelAllocator::allocPixelRef(SkBitmap *bitmap, SkColorTable *ctable) {
JNIEnv *env = vm2env(mJavaVM);
mStorage = GraphicsJNI::allocateJavaPixelRef(env, bitmap, ctable); return mStorage != nullptr;
}
android::Bitmap *GraphicsJNI::allocateJavaPixelRef(JNIEnv *env, SkBitmap *bitmap,
SkColorTable *ctable) { const SkImageInfo &info = bitmap->info(); if (info.colorType() == kUnknown_SkColorType) {
doThrowIAE(env, "unknown bitmap configuration"); return NULL;
}
size_t size; if (!computeAllocationSize(*bitmap, &size)) { return NULL;
} // we must respect the rowBytes value already set on the bitmap instead of
// attempting to compute our own.
const size_t rowBytes = bitmap->rowBytes();
jbyteArray arrayObj = (jbyteArray) env->CallObjectMethod(gVMRuntime,
gVMRuntime_newNonMovableArray,
gByte_class, size); if (env->ExceptionCheck() != 0) { return NULL;
}
SkASSERT(arrayObj);
jbyte *addr = (jbyte *) env->CallLongMethod(gVMRuntime, gVMRuntime_addressOf, arrayObj); if (env->ExceptionCheck() != 0) { return NULL;
}
SkASSERT(addr);
android::Bitmap *wrapper = new android::Bitmap(env, arrayObj, (void *) addr, info, rowBytes,
ctable);
wrapper->getSkBitmap(bitmap); // since we're already allocated, we lockPixels right away
// HeapAllocator behaves this way too
bitmap->lockPixels(); return wrapper;
}从上面就可以看到, new android::Bitmap 见:
frameworks/base/core/jni/android/graphics/Bitmap.cpp
Bitmap::Bitmap(JNIEnv *env, jbyteArray storageObj, void *address, const SkImageInfo &info, size_t rowBytes, SkColorTable *ctable)
: mPixelStorageType(PixelStorageType::Java) {
env->GetJavaVM(&mPixelStorage.java.jvm);
mPixelStorage.java.jweakRef = env->NewWeakGlobalRef(storageObj);
mPixelStorage.java.jstrongRef = nullptr;
mPixelRef.reset(new WrappedPixelRef(this, address, info, rowBytes, ctable)); // Note: this will trigger a call to onStrongRefDestroyed(), but
// we want the pixel ref to have a ref count of 0 at this point
mPixelRef->unref();
}address 获取的是 arrayObj 的地址,而 arrayObj 是 jbyteArray 数据类型,也就是说这里是通过 JNI 世界进入了 Java 世界开辟了内存,好比 Zygote 进入 Java 世界是通过 JNI 调用 com.android.internal.os.ZygoteInit 类的 main 函数是一个道理~ 我们还可以继续跟到 gVMRuntime_newNonMovableArray 中去看看实现,最后是 runtime->GetHeap() 上分配内存也就是 Java heap 内存。这里我就不再贴具体的代码了。
我们还得看下 8.0 的源码,比较一下它与 7.0 之间的区别:
external/skia/src/core/SkBitmap.cpp
bool SkBitmap::tryAllocPixels(Allocator *allocator, SkColorTable *ctable) {
HeapAllocator stdalloc; if (nullptr == allocator) {
allocator = &stdalloc;
} return allocator->allocPixelRef(this, ctable);
}bool HeapAllocator::allocPixelRef(SkBitmap *bitmap, SkColorTable *ctable) {
mStorage = android::Bitmap::allocateHeapBitmap(bitmap, ctable); return !!mStorage;
}
作者:红橙Darren
链接:https://www.jianshu.com/p/8e8ad414237e
共同学习,写下你的评论
评论加载中...
作者其他优质文章