Commit e137db85 authored by bsalomon's avatar bsalomon Committed by Commit bot
Browse files

Revert of Move npot resizing out of GrContext and simplify GrContext texture...

Revert of Move npot resizing out of GrContext and simplify GrContext texture functions. (patchset #10 id:200001 of https://codereview.chromium.org/882223003/)

Reason for revert:
perf fix didn't fix the cr webgl conformance tests

Original issue's description:
> Move npot resizing out of GrContext and simplify GrContext texture functions.
>
> Committed: https://skia.googlesource.com/skia/+/8a8100349105c8c6de39fcb34e47679da7a67f54
>
> Committed: https://skia.googlesource.com/skia/+/6c96672491b04cb782bce8fee778124df66524a0

TBR=robertphillips@google.com
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true

Review URL: https://codereview.chromium.org/887303002
parent 8a4527e9
......@@ -169,10 +169,10 @@ public:
*/
void purgeAllUnlockedResources();
/** Sets a content key on the resource. The resource must not already have a content key and
* the key must not already be in use for this to succeed.
/**
* Stores a custom resource in the cache, based on the specified key.
*/
bool addResourceToCache(const GrContentKey&, GrGpuResource*);
void addResourceToCache(const GrContentKey&, GrGpuResource*);
/**
* Finds a resource in the cache, based on the specified key. This is intended for use in
......@@ -181,25 +181,6 @@ public:
*/
GrGpuResource* findAndRefCachedResource(const GrContentKey&);
/** Helper for casting resource to a texture. Caller must be sure that the resource cached
with the key is either NULL or a texture and not another resource type. */
GrTexture* findAndRefCachedTexture(const GrContentKey& key) {
GrGpuResource* resource = this->findAndRefCachedResource(key);
if (resource) {
GrTexture* texture = static_cast<GrSurface*>(resource)->asTexture();
SkASSERT(texture);
return texture;
}
return NULL;
}
/**
* Determines whether a resource is in the cache. If the resource is found it
* will not be locked or returned. This call does not affect the priority of
* the resource for deletion.
*/
bool isResourceInCache(const GrContentKey& key) const;
/**
* Creates a new text rendering context that is optimal for the
* render target and the context. Caller assumes the ownership
......@@ -214,28 +195,57 @@ public:
// Textures
/**
* Creates a new texture in the resource cache and returns it. The caller owns a
* Creates a new entry, based on the specified key and texture and returns it. The caller owns a
* ref on the returned texture which must be balanced by a call to unref.
*
* TODO: Move resizing logic out of GrContext and have the caller set the content key on the
* returned texture rather than take it as a param.
*
* @param params The texture params used to draw a texture may help determine
* the cache entry used. (e.g. different versions may exist
* for different wrap modes on GPUs with limited NPOT
* texture support). NULL implies clamp wrap modes.
* @param desc Description of the texture properties.
* @param key Key to associate with the texture.
* @param srcData Pointer to the pixel values.
* @param rowBytes The number of bytes between rows of the texture. Zero
* implies tightly packed rows. For compressed pixel configs, this
* field is ignored.
*/
GrTexture* createTexture(const GrSurfaceDesc& desc, const void* srcData, size_t rowBytes);
GrTexture* createTexture(const GrSurfaceDesc& desc) {
return this->createTexture(desc, NULL, 0);
}
/**
* Creates a texture that is outside the cache. Does not count against
* cache's budget.
* @param outKey (optional) If non-NULL, we'll write the cache key we used to cacheKey. this
* may differ from key on GPUs that don't support tiling NPOT textures.
*/
GrTexture* createTexture(const GrTextureParams* params,
const GrSurfaceDesc& desc,
const GrContentKey& key,
const void* srcData,
size_t rowBytes,
GrContentKey* outKey = NULL);
/**
* Search for an entry based on key and dimensions. If found, ref it and return it. The return
* value will be NULL if not found. The caller must balance with a call to unref.
*
* TODO: Add a budgeted param to createTexture and remove this function.
* TODO: Remove this function and do lookups generically.
*
* @param desc Description of the texture properties.
* @param key key to use for texture look up.
* @param params The texture params used to draw a texture may help determine
* the cache entry used. (e.g. different versions may exist
* for different wrap modes on GPUs with limited NPOT
* texture support). NULL implies clamp wrap modes.
*/
GrTexture* findAndRefTexture(const GrSurfaceDesc& desc,
const GrContentKey& key,
const GrTextureParams* params);
/**
* Determines whether a texture is in the cache. If the texture is found it
* will not be locked or returned. This call does not affect the priority of
* the texture for deletion.
*
* TODO: Remove this function and do cache checks generically.
*/
GrTexture* createUncachedTexture(const GrSurfaceDesc& desc, void* srcData, size_t rowBytes);
bool isTextureInCache(const GrSurfaceDesc& desc,
const GrContentKey& key,
const GrTextureParams* params) const;
/**
* Enum that determines how closely a returned scratch texture must match
......@@ -274,9 +284,26 @@ public:
bool internalFlag = false);
/**
* Returns true if index8 textures are supported.
* Creates a texture that is outside the cache. Does not count against
* cache's budget.
*
* Textures created by createTexture() hide the complications of
* tiling non-power-of-two textures on APIs that don't support this (e.g.
* unextended GLES2). NPOT uncached textures are not tilable on such APIs.
*/
GrTexture* createUncachedTexture(const GrSurfaceDesc& desc,
void* srcData,
size_t rowBytes);
/**
* Returns true if the specified use of an indexed texture is supported.
* Support may depend upon whether the texture params indicate that the
* texture will be tiled. Passing NULL for the texture params indicates
* clamp mode.
*/
bool supportsIndex8PixelConfig() const;
bool supportsIndex8PixelConfig(const GrTextureParams*,
int width,
int height) const;
/**
* Return the max width or height of a texture supported by the current GPU.
......
......@@ -25,15 +25,23 @@
* created by subclassing GrProcessor.
*
* The primitive color computation starts with the color specified by setColor(). This color is the
* input to the first color stage. Each color stage feeds its output to the next color stage.
* input to the first color stage. Each color stage feeds its output to the next color stage. The
* final color stage's output color is input to the color filter specified by
* setXfermodeColorFilter which produces the final source color, S.
*
* Fractional pixel coverage follows a similar flow. The coverage is initially the value specified
* by setCoverage(). This is input to the first coverage stage. Coverage stages are chained
* together in the same manner as color stages. The output of the last stage is modulated by any
* fractional coverage produced by anti-aliasing. This last step produces the final coverage, C.
*
* setXPFactory is used to control blending between the output color and dest. It also implements
* the application of fractional coverage from the coverage pipeline.
* setBlendFunc() specifies blending coefficients for S (described above) and D, the initial value
* of the destination pixel, labeled Bs and Bd respectively. The final value of the destination
* pixel is then D' = (1-C)*D + C*(Bd*D + Bs*S).
*
* Note that the coverage is applied after the blend. This is why they are computed as distinct
* values.
*
* TODO: Encapsulate setXfermodeColorFilter in a GrProcessor and remove from GrPaint.
*/
class GrPaint {
public:
......
......@@ -220,7 +220,6 @@ public:
Builder(GrContentKey* key, const GrContentKey& innerKey, Domain domain,
int extraData32Cnt)
: INHERITED::Builder(key, domain, Data32CntForInnerKey(innerKey) + extraData32Cnt) {
SkASSERT(&innerKey != key);
// add the inner key to the end of the key so that op[] can be indexed normally.
uint32_t* innerKeyData = &this->operator[](extraData32Cnt);
const uint32_t* srcData = innerKey.data();
......
......@@ -302,22 +302,6 @@ static inline bool GrPixelConfigIsCompressed(GrPixelConfig config) {
}
}
/** If the pixel config is compressed, return an equivalent uncompressed format. */
static inline GrPixelConfig GrMakePixelConfigUncompressed(GrPixelConfig config) {
switch (config) {
case kIndex_8_GrPixelConfig:
case kETC1_GrPixelConfig:
case kASTC_12x12_GrPixelConfig:
return kRGBA_8888_GrPixelConfig;
case kLATC_GrPixelConfig:
case kR11_EAC_GrPixelConfig:
return kAlpha_8_GrPixelConfig;
default:
SkASSERT(!GrPixelConfigIsCompressed(config));
return config;
}
}
// Returns true if the pixel config is 32 bits per pixel
static inline bool GrPixelConfigIs8888(GrPixelConfig config) {
switch (config) {
......
......@@ -750,6 +750,7 @@ void GrGLRectBlurEffect::setData(const GrGLProgramDataManager& pdman,
bool GrRectBlurEffect::CreateBlurProfileTexture(GrContext *context, float sigma,
GrTexture **blurProfileTexture) {
GrTextureParams params;
GrSurfaceDesc texDesc;
unsigned int profileSize = SkScalarCeilToInt(6*sigma);
......@@ -767,19 +768,18 @@ bool GrRectBlurEffect::CreateBlurProfileTexture(GrContext *context, float sigma,
uint8_t *profile = NULL;
SkAutoTDeleteArray<uint8_t> ada(NULL);
*blurProfileTexture = context->findAndRefCachedTexture(key);
*blurProfileTexture = context->findAndRefTexture(texDesc, key, &params);
if (NULL == *blurProfileTexture) {
SkBlurMask::ComputeBlurProfile(sigma, &profile);
ada.reset(profile);
*blurProfileTexture = context->createTexture(texDesc, profile, 0);
*blurProfileTexture = context->createTexture(&params, texDesc, key, profile, 0);
if (NULL == *blurProfileTexture) {
return false;
}
SkAssertResult(context->addResourceToCache(key, *blurProfileTexture));
}
return true;
......@@ -925,13 +925,21 @@ GrFragmentProcessor* GrRRectBlurEffect::Create(GrContext* context, float sigma,
builder[1] = cornerRadius;
builder.finish();
GrTexture *blurNinePatchTexture = context->findAndRefCachedTexture(key);
GrTextureParams params;
params.setFilterMode(GrTextureParams::kBilerp_FilterMode);
unsigned int smallRectSide = 2*(blurRadius + cornerRadius) + 1;
unsigned int texSide = smallRectSide + 2*blurRadius;
GrSurfaceDesc texDesc;
texDesc.fWidth = texSide;
texDesc.fHeight = texSide;
texDesc.fConfig = kAlpha_8_GrPixelConfig;
GrTexture *blurNinePatchTexture = context->findAndRefTexture(texDesc, key, &params);
if (NULL == blurNinePatchTexture) {
SkMask mask;
unsigned int smallRectSide = 2*(blurRadius + cornerRadius) + 1;
mask.fBounds = SkIRect::MakeWH(smallRectSide, smallRectSide);
mask.fFormat = SkMask::kA8_Format;
mask.fRowBytes = mask.fBounds.width();
......@@ -949,22 +957,12 @@ GrFragmentProcessor* GrRRectBlurEffect::Create(GrContext* context, float sigma,
SkPath path;
path.addRRect( smallRRect );
SkDraw::DrawToMask(path, &mask.fBounds, NULL, NULL, &mask,
SkMask::kJustRenderImage_CreateMode, SkPaint::kFill_Style);
SkDraw::DrawToMask(path, &mask.fBounds, NULL, NULL, &mask, SkMask::kJustRenderImage_CreateMode, SkPaint::kFill_Style);
SkMask blurred_mask;
SkBlurMask::BoxBlur(&blurred_mask, mask, sigma, kNormal_SkBlurStyle, kHigh_SkBlurQuality,
NULL, true );
unsigned int texSide = smallRectSide + 2*blurRadius;
GrSurfaceDesc texDesc;
texDesc.fWidth = texSide;
texDesc.fHeight = texSide;
texDesc.fConfig = kAlpha_8_GrPixelConfig;
blurNinePatchTexture = context->createTexture(texDesc, blurred_mask.fImage, 0);
SkAssertResult(context->addResourceToCache(key, blurNinePatchTexture));
SkBlurMask::BoxBlur(&blurred_mask, mask, sigma, kNormal_SkBlurStyle, kHigh_SkBlurQuality, NULL, true );
blurNinePatchTexture = context->createTexture(&params, texDesc, key, blurred_mask.fImage, 0);
SkMask::FreeImage(blurred_mask.fImage);
}
......
......@@ -354,12 +354,12 @@ GrFragmentProcessor* SkColorCubeFilter::asFragmentProcessor(GrContext* context)
desc.fHeight = fCache.cubeDimension() * fCache.cubeDimension();
desc.fConfig = kRGBA_8888_GrPixelConfig;
SkAutoTUnref<GrTexture> textureCube(context->findAndRefCachedTexture(key));
if (!textureCube) {
textureCube.reset(context->createTexture(desc, fCubeData->data(), 0));
if (textureCube) {
SkAssertResult(context->addResourceToCache(key, textureCube));
}
GrSurface* surface = static_cast<GrSurface*>(context->findAndRefCachedResource(key));
SkAutoTUnref<GrTexture> textureCube;
if (surface) {
textureCube.reset(surface->asTexture());
} else {
textureCube.reset(context->createTexture(NULL, desc, key, fCubeData->data(), 0));
}
return textureCube ? GrColorCubeEffect::Create(textureCube) : NULL;
......
......@@ -223,9 +223,219 @@ GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
////////////////////////////////////////////////////////////////////////////////
GrTexture* GrContext::createTexture(const GrSurfaceDesc& desc, const void* srcData,
size_t rowBytes) {
return fGpu->createTexture(desc, true, srcData, rowBytes);
static void stretch_image(void* dst,
int dstW,
int dstH,
const void* src,
int srcW,
int srcH,
size_t bpp) {
SkFixed dx = (srcW << 16) / dstW;
SkFixed dy = (srcH << 16) / dstH;
SkFixed y = dy >> 1;
size_t dstXLimit = dstW*bpp;
for (int j = 0; j < dstH; ++j) {
SkFixed x = dx >> 1;
const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)*srcW*bpp;
uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp;
for (size_t i = 0; i < dstXLimit; i += bpp) {
memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp);
x += dx;
}
y += dy;
}
}
enum ResizeFlags {
/**
* The kStretchToPOT bit is set when the texture is NPOT and is being repeated or mipped but the
* hardware doesn't support that feature.
*/
kStretchToPOT_ResizeFlag = 0x1,
/**
* The kBilerp bit can only be set when the kStretchToPOT flag is set and indicates whether the
* stretched texture should be bilerped.
*/
kBilerp_ResizeFlag = 0x2,
};
static uint32_t get_texture_flags(const GrGpu* gpu,
const GrTextureParams* params,
const GrSurfaceDesc& desc) {
uint32_t flags = 0;
bool tiled = params && params->isTiled();
if (tiled && !gpu->caps()->npotTextureTileSupport()) {
if (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight)) {
flags |= kStretchToPOT_ResizeFlag;
switch(params->filterMode()) {
case GrTextureParams::kNone_FilterMode:
break;
case GrTextureParams::kBilerp_FilterMode:
case GrTextureParams::kMipMap_FilterMode:
flags |= kBilerp_ResizeFlag;
break;
}
}
}
return flags;
}
// The desired texture is NPOT and tiled but that isn't supported by
// the current hardware. Resize the texture to be a POT
GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc,
const GrContentKey& origKey,
const void* srcData,
size_t rowBytes,
bool filter) {
SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, origKey, NULL));
if (NULL == clampedTexture) {
clampedTexture.reset(this->createTexture(NULL, desc, origKey, srcData, rowBytes));
if (NULL == clampedTexture) {
return NULL;
}
clampedTexture->cacheAccess().setContentKey(origKey);
}
GrSurfaceDesc rtDesc = desc;
rtDesc.fFlags = rtDesc.fFlags |
kRenderTarget_GrSurfaceFlag |
kNoStencil_GrSurfaceFlag;
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
GrTexture* texture = fGpu->createTexture(rtDesc, true, NULL, 0);
if (texture) {
GrPipelineBuilder pipelineBuilder;
pipelineBuilder.setRenderTarget(texture->asRenderTarget());
// if filtering is not desired then we want to ensure all
// texels in the resampled image are copies of texels from
// the original.
GrTextureParams params(SkShader::kClamp_TileMode,
filter ? GrTextureParams::kBilerp_FilterMode :
GrTextureParams::kNone_FilterMode);
pipelineBuilder.addColorTextureProcessor(clampedTexture, SkMatrix::I(), params);
uint32_t flags = GrDefaultGeoProcFactory::kPosition_GPType |
GrDefaultGeoProcFactory::kLocalCoord_GPType;
SkAutoTUnref<const GrGeometryProcessor> gp(
GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE));
GrDrawTarget::AutoReleaseGeometry arg(fDrawBuffer, 4, gp->getVertexStride(), 0);
SkASSERT(gp->getVertexStride() == 2 * sizeof(SkPoint));
if (arg.succeeded()) {
SkPoint* verts = (SkPoint*) arg.vertices();
verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
fDrawBuffer->drawNonIndexed(&pipelineBuilder, gp, kTriangleFan_GrPrimitiveType, 0, 4);
} else {
texture->unref();
texture = NULL;
}
} else {
// TODO: Our CPU stretch doesn't filter. But we create separate
// stretched textures when the texture params is either filtered or
// not. Either implement filtered stretch blit on CPU or just create
// one when FBO case fails.
rtDesc.fFlags = kNone_GrSurfaceFlags;
// no longer need to clamp at min RT size.
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
// We shouldn't be resizing a compressed texture.
SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
size_t bpp = GrBytesPerPixel(desc.fConfig);
GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
srcData, desc.fWidth, desc.fHeight, bpp);
size_t stretchedRowBytes = rtDesc.fWidth * bpp;
texture = fGpu->createTexture(rtDesc, true, stretchedPixels.get(), stretchedRowBytes);
SkASSERT(texture);
}
return texture;
}
static GrContentKey::Domain ResizeDomain() {
static const GrContentKey::Domain kDomain = GrContentKey::GenerateDomain();
return kDomain;
}
GrTexture* GrContext::createTexture(const GrTextureParams* params,
const GrSurfaceDesc& desc,
const GrContentKey& origKey,
const void* srcData,
size_t rowBytes,
GrContentKey* outKey) {
GrTexture* texture;
uint32_t flags = get_texture_flags(fGpu, params, desc);
SkTCopyOnFirstWrite<GrContentKey> key(origKey);
if (flags) {
// We don't have a code path to resize compressed textures.
if (GrPixelConfigIsCompressed(desc.fConfig)) {
return NULL;
}
texture = this->createResizedTexture(desc, origKey, srcData, rowBytes,
SkToBool(flags & kBilerp_ResizeFlag));
GrContentKey::Builder builder(key.writable(), origKey, ResizeDomain(), 1);
builder[0] = flags;
} else {
texture = fGpu->createTexture(desc, true, srcData, rowBytes);
}
if (texture) {
if (texture->cacheAccess().setContentKey(*key)) {
if (outKey) {
*outKey = *key;
}
} else {
texture->unref();
texture = NULL;
}
}
return texture;
}
GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc,
const GrContentKey& origKey,
const GrTextureParams* params) {
uint32_t flags = get_texture_flags(fGpu, params, desc);
SkTCopyOnFirstWrite<GrContentKey> key(origKey);
if (flags) {
GrContentKey::Builder builder(key.writable(), origKey, ResizeDomain(), 1);
builder[0] = flags;
}
GrGpuResource* resource = this->findAndRefCachedResource(*key);
if (resource) {
SkASSERT(static_cast<GrSurface*>(resource)->asTexture());
return static_cast<GrSurface*>(resource)->asTexture();
}
return NULL;
}
bool GrContext::isTextureInCache(const GrSurfaceDesc& desc,
const GrContentKey& origKey,
const GrTextureParams* params) const {
uint32_t flags = get_texture_flags(fGpu, params, desc);
SkTCopyOnFirstWrite<GrContentKey> key(origKey);
if (flags) {
GrContentKey::Builder builder(key.writable(), origKey, ResizeDomain(), 1);
builder[0] = flags;
}
return fResourceCache2->hasContentKey(*key);
}
GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexMatch match,
......@@ -316,6 +526,19 @@ GrTexture* GrContext::createUncachedTexture(const GrSurfaceDesc& desc,
return fGpu->createTexture(desc, false, srcData, rowBytes);
}
void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
if (maxTextures) {
*maxTextures = fResourceCache2->getMaxResourceCount();
}
if (maxTextureBytes) {
*maxTextureBytes = fResourceCache2->getMaxResourceBytes();
}
}
void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
fResourceCache2->setLimits(maxTextures, maxTextureBytes);
}
int GrContext::getMaxTextureSize() const {
return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
}
......@@ -340,9 +563,22 @@ GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDe
///////////////////////////////////////////////////////////////////////////////
bool GrContext::supportsIndex8PixelConfig() const {
bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
int width, int height) const {
const GrDrawTargetCaps* caps = fGpu->caps();
return caps->isConfigTexturable(kIndex_8_GrPixelConfig);
if (!caps->isConfigTexturable(kIndex_8_GrPixelConfig)) {
return false;
}
bool isPow2 = SkIsPow2(width) && SkIsPow2(height);
if (!isPow2) {
bool tiled = params && params->isTiled();
if (tiled && !caps->npotTextureTileSupport()) {
return false;
}
}
return true;
}
......@@ -1531,39 +1767,14 @@ const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture,
}
}
//////////////////////////////////////////////////////////////////////////////
void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
if (maxTextures) {
*maxTextures = fResourceCache2->getMaxResourceCount();
}
if (maxTextureBytes) {
*maxTextureBytes = fResourceCache2->getMaxResourceBytes();
}
}
void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
fResourceCache2->setLimits(maxTextures, maxTextureBytes);
}
bool GrContext::addResourceToCache(const GrContentKey& key, GrGpuResource* resource) {
ASSERT_OWNED_RESOURCE(resource);
if (!resource || resource->wasDestroyed()) {
return false;
}
return resource->cacheAccess().setContentKey(key);
}
bool GrContext::isResourceInCache(const GrContentKey& key) const {
return fResourceCache2->hasContentKey(key);
void GrContext::addResourceToCache(const GrContentKey& key, GrGpuResource* resource) {
resource->cacheAccess().setContentKey(key);
}
GrGpuResource* GrContext::findAndRefCachedResource(const GrContentKey& key) {
return fResourceCache2->findAndRefContentResource(key);
}
//////////////////////////////////////////////////////////////////////////////
void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
fGpu->addGpuTraceMarker(marker);
if (fDrawBuffer) {
......
......@@ -9,7 +9,6 @@
#include "GrDrawTargetCaps.h"
#include "GrGpu.h"
#include "GrGpuResourceCacheAccess.h"
#include "GrXferProcessor.h"
#include "SkColorFilter.h"
#include "SkConfig8888.h"
......@@ -87,45 +86,7 @@ static void build_index8_data(void* buffer, const SkBitmap& bitmap) {
////////////////////////////////////////////////////////////////////////////////
enum Stretch {
kNo_Stretch,
kBilerp_Stretch,
kNearest_Stretch
};
static Stretch get_stretch_type(const GrContext* ctx, int width, int height,
const GrTextureParams* params) {
if (params && params->isTiled()) {
const GrDrawTargetCaps* caps = ctx->getGpu()->caps();
if (!caps->npotTextureTileSupport() && (!SkIsPow2(width) || !SkIsPow2(height))) {
switch(params->filterMode()) {
case GrTextureParams::kNone_FilterMode:
return kNearest_Stretch;
case GrTextureParams::kBilerp_FilterMode:
case GrTextureParams::kMipMap_FilterMode:
return kBilerp_Stretch;
}
}
}
return kNo_Stretch;
}
static bool make_resize_key(const GrContentKey& origKey, Stretch stretch, GrContentKey* resizeKey) {
if (origKey.isValid() && kNo_Stretch != stretch) {
static const GrContentKey::Domain kDomain = GrContentKey::GenerateDomain();
GrContentKey::Builder builder(resizeKey, origKey, kDomain, 1);
builder[0] = stretch;
builder.finish();
return true;
}
SkASSERT(!resizeKey->isValid());
return false;
}
static void generate_bitmap_keys(const SkBitmap& bitmap,
Stretch stretch,
GrContentKey* key,
GrContentKey* resizedKey) {
static void generate_bitmap_key(const SkBitmap& bitmap, GrContentKey* key) {
// Our id includes the offset, width, and height so that bitmaps created by extractSubset()
// are unique.
uint32_t genID = bitmap.getGenerationID();
......@@ -139,11 +100,6 @@ static void generate_bitmap_keys(const SkBitmap& bitmap,
builder[1] = origin.fX;
builder[2] = origin.fY;
builder[3] = width | (height << 16);
builder.finish();
if (kNo_Stretch != stretch) {
make_resize_key(*key, stretch, resizedKey);
}
}
static void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrSurfaceDesc* desc) {
......@@ -171,106 +127,45 @@ private:
} // namespace
#if 0 // TODO: plug this back up
static void add_genID_listener(const GrContentKey& key, SkPixelRef* pixelRef) {
SkASSERT(pixelRef);
pixelRef->addGenIDChangeListener(SkNEW_ARGS(GrResourceInvalidator, (key)));
}
#endif
// creates a new texture that is the input texture scaled up to the next power of two in
// width or height. If optionalKey is valid it will be set on the new texture. stretch
// controls whether the scaling is done using nearest or bilerp filtering.
GrTexture* resize_texture(GrTexture* inputTexture, Stretch stretch,
const GrContentKey& optionalKey) {
SkASSERT(kNo_Stretch != stretch);
GrContext* context = inputTexture->getContext();
SkASSERT(context);
// Either it's a cache miss or the original wasn't cached to begin with.
GrSurfaceDesc rtDesc = inputTexture->desc();
rtDesc.fFlags = rtDesc.fFlags |
kRenderTarget_GrSurfaceFlag |
kNoStencil_GrSurfaceFlag;
rtDesc.fWidth = GrNextPow2(rtDesc.fWidth);
rtDesc.fHeight = GrNextPow2(rtDesc.fHeight);
rtDesc.fConfig = GrMakePixelConfigUncompressed(rtDesc.fConfig);
// If the config isn't renderable try converting to either A8 or an 32 bit config. Otherwise,
// fail.
if (!context->isConfigRenderable(rtDesc.fConfig, false)) {
if (GrPixelConfigIsAlphaOnly(rtDesc.fConfig)) {
if (context->isConfigRenderable(kAlpha_8_GrPixelConfig, false)) {
rtDesc.fConfig = kAlpha_8_GrPixelConfig;
} else if (context->isConfigRenderable(kSkia8888_GrPixelConfig, false)) {
rtDesc.fConfig = kSkia8888_GrPixelConfig;
} else {
return NULL;
}
} else if (kRGB_GrColorComponentFlags ==
(kRGB_GrColorComponentFlags & GrPixelConfigComponentMask(rtDesc.fConfig))) {
if (context->isConfigRenderable(kSkia8888_GrPixelConfig, false)) {
rtDesc.fConfig = kSkia8888_GrPixelConfig;
} else {
return NULL;
}
} else {
return NULL;
}
}
GrTexture* resized = context->getGpu()->createTexture(rtDesc, true, NULL, 0);
if (!resized) {
return NULL;
}
GrPaint paint;
// If filtering is not desired then we want to ensure all texels in the resampled image are
// copies of texels from the original.
GrTextureParams params(SkShader::kClamp_TileMode,
kBilerp_Stretch == stretch ? GrTextureParams::kBilerp_FilterMode :
GrTextureParams::kNone_FilterMode);
paint.addColorTextureProcessor(inputTexture, SkMatrix::I(), params);
SkRect rect = SkRect::MakeWH(SkIntToScalar(rtDesc.fWidth), SkIntToScalar(rtDesc.fHeight));
SkRect localRect = SkRect::MakeWH(1.f, 1.f);
GrContext::AutoRenderTarget autoRT(context, resized->asRenderTarget());
GrContext::AutoClip ac(context, GrContext::AutoClip::kWideOpen_InitialClip);
context->drawNonAARectToRect(paint, SkMatrix::I(), rect, localRect);
if (optionalKey.isValid()) {
SkAssertResult(context->addResourceToCache(optionalKey, resized));
}
return resized;
}
static GrTexture* sk_gr_allocate_texture(GrContext* ctx,
const GrContentKey& optionalKey,
bool cache,
const GrTextureParams* params,
const SkBitmap& bm,
GrSurfaceDesc desc,
const void* pixels,
size_t rowBytes) {
GrTexture* result;
if (optionalKey.isValid()) {
result = ctx->createTexture(desc, pixels, rowBytes);
if (cache) {
// This texture is likely to be used again so leave it in the cache
GrContentKey key;
generate_bitmap_key(bm, &key);
result = ctx->createTexture(params, desc, key, pixels, rowBytes, &key);
if (result) {
SkAssertResult(ctx->addResourceToCache(optionalKey, result));
add_genID_listener(key, bm.pixelRef());
}
} else {
} else {
// This texture is unlikely to be used again (in its present form) so
// just use a scratch texture. This will remove the texture from the
// cache so no one else can find it. Additionally, once unlocked, the
// scratch texture will go to the end of the list for purging so will
// likely be available for this volatile bitmap the next time around.
result = ctx->refScratchTexture(desc, GrContext::kExact_ScratchTexMatch);
if (pixels && result) {
result->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, pixels, rowBytes);
if (pixels) {
result->writePixels(0, 0, bm.width(), bm.height(), desc.fConfig, pixels, rowBytes);
}
}
return result;
}
#ifndef SK_IGNORE_ETC1_SUPPORT
static GrTexture *load_etc1_texture(GrContext* ctx, const GrContentKey& optionalKey,
static GrTexture *load_etc1_texture(GrContext* ctx, bool cache,
const GrTextureParams* params,
const SkBitmap &bm, GrSurfaceDesc desc) {
SkAutoTUnref<SkData> data(bm.pixelRef()->refEncodedData());
......@@ -315,11 +210,11 @@ static GrTexture *load_etc1_texture(GrContext* ctx, const GrContentKey& optional
return NULL;
}
return sk_gr_allocate_texture(ctx, optionalKey, desc, bytes, 0);
return sk_gr_allocate_texture(ctx, cache, params, bm, desc, bytes, 0);
}
#endif // SK_IGNORE_ETC1_SUPPORT
static GrTexture* load_yuv_texture(GrContext* ctx, const GrContentKey& key,
static GrTexture *load_yuv_texture(GrContext* ctx, bool cache, const GrTextureParams* params,
const SkBitmap& bm, const GrSurfaceDesc& desc) {
// Subsets are not supported, the whole pixelRef is loaded when using YUV decoding
SkPixelRef* pixelRef = bm.pixelRef();
......@@ -387,31 +282,30 @@ static GrTexture* load_yuv_texture(GrContext* ctx, const GrContentKey& key,
kRenderTarget_GrSurfaceFlag |
kNoStencil_GrSurfaceFlag;
GrTexture* result = ctx->createTexture(rtDesc, NULL, 0);
if (!result) {
return NULL;
GrTexture* result = sk_gr_allocate_texture(ctx, cache, params, bm, rtDesc, NULL, 0);
GrRenderTarget* renderTarget = result ? result->asRenderTarget() : NULL;
if (renderTarget) {
SkAutoTUnref<GrFragmentProcessor> yuvToRgbProcessor(GrYUVtoRGBEffect::Create(
yuvTextures[0], yuvTextures[1], yuvTextures[2], yuvInfo.fColorSpace));
GrPaint paint;
paint.addColorProcessor(yuvToRgbProcessor);
SkRect r = SkRect::MakeWH(SkIntToScalar(yuvInfo.fSize[0].fWidth),
SkIntToScalar(yuvInfo.fSize[0].fHeight));
GrContext::AutoRenderTarget autoRT(ctx, renderTarget);
GrContext::AutoClip ac(ctx, GrContext::AutoClip::kWideOpen_InitialClip);
ctx->drawRect(paint, SkMatrix::I(), r);
} else {
SkSafeSetNull(result);
}
GrRenderTarget* renderTarget = result->asRenderTarget();
SkASSERT(renderTarget);
SkAutoTUnref<GrFragmentProcessor>
yuvToRgbProcessor(GrYUVtoRGBEffect::Create(yuvTextures[0], yuvTextures[1], yuvTextures[2],
yuvInfo.fColorSpace));
GrPaint paint;
paint.addColorProcessor(yuvToRgbProcessor);
SkRect r = SkRect::MakeWH(SkIntToScalar(yuvInfo.fSize[0].fWidth),
SkIntToScalar(yuvInfo.fSize[0].fHeight));
GrContext::AutoRenderTarget autoRT(ctx, renderTarget);
GrContext::AutoClip ac(ctx, GrContext::AutoClip::kWideOpen_InitialClip);
ctx->drawRect(paint, SkMatrix::I(), r);
return result;
}
static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx,
const SkBitmap& origBitmap,
const GrContentKey& optionalKey) {
static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx,
bool cache,
const GrTextureParams* params,
const SkBitmap& origBitmap) {
SkBitmap tmpBitmap;
const SkBitmap* bitmap = &origBitmap;
......@@ -420,7 +314,9 @@ static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx,
generate_bitmap_texture_desc(*bitmap, &desc);
if (kIndex_8_SkColorType == bitmap->colorType()) {
if (ctx->supportsIndex8PixelConfig()) {
// build_compressed_data doesn't do npot->pot expansion
// and paletted textures can't be sub-updated
if (cache && ctx->supportsIndex8PixelConfig(params, bitmap->width(), bitmap->height())) {
size_t imageSize = GrCompressedFormatDataSize(kIndex_8_GrPixelConfig,
bitmap->width(), bitmap->height());
SkAutoMalloc storage(imageSize);
......@@ -428,7 +324,8 @@ static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx,
// our compressed data will be trimmed, so pass width() for its
// "rowBytes", since they are the same now.
return sk_gr_allocate_texture(ctx, optionalKey, desc, storage.get(), bitmap->width());
return sk_gr_allocate_texture(ctx, cache, params, origBitmap,
desc, storage.get(), bitmap->width());
} else {
origBitmap.copyTo(&tmpBitmap, kN32_SkColorType);
// now bitmap points to our temp, which has been promoted to 32bits
......@@ -442,7 +339,7 @@ static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx,
else if (
// We do not support scratch ETC1 textures, hence they should all be at least
// trying to go to the cache.
optionalKey.isValid()
cache
// Make sure that the underlying device supports ETC1 textures before we go ahead
// and check the data.
&& ctx->getGpu()->caps()->isConfigTexturable(kETC1_GrPixelConfig)
......@@ -451,7 +348,7 @@ static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx,
// the bitmap has available pixels, then they might not be what the decompressed
// data is.
&& !(bitmap->readyToDraw())) {
GrTexture *texture = load_etc1_texture(ctx, optionalKey, *bitmap, desc);
GrTexture *texture = load_etc1_texture(ctx, cache, params, *bitmap, desc);
if (texture) {
return texture;
}
......@@ -459,7 +356,7 @@ static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx,
#endif // SK_IGNORE_ETC1_SUPPORT
else {
GrTexture *texture = load_yuv_texture(ctx, optionalKey, *bitmap, desc);
GrTexture *texture = load_yuv_texture(ctx, cache, params, *bitmap, desc);
if (texture) {
return texture;
}
......@@ -469,32 +366,8 @@ static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx,
return NULL;
}
return sk_gr_allocate_texture(ctx, optionalKey, desc, bitmap->getPixels(), bitmap->rowBytes());
}
static GrTexture* create_bitmap_texture(GrContext* ctx,
const SkBitmap& bmp,
Stretch stretch,
const GrContentKey& unstretchedKey,
const GrContentKey& stretchedKey) {
if (kNo_Stretch != stretch) {
SkAutoTUnref<GrTexture> unstretched;
// Check if we have the unstretched version in the cache, if not create it.
if (unstretchedKey.isValid()) {
unstretched.reset(ctx->findAndRefCachedTexture(unstretchedKey));
}
if (!unstretched) {
unstretched.reset(create_unstretched_bitmap_texture(ctx, bmp, unstretchedKey));
if (!unstretched) {
return NULL;
}
}
GrTexture* resized = resize_texture(unstretched, stretch, stretchedKey);
return resized;
}
return create_unstretched_bitmap_texture(ctx, bmp, unstretchedKey);
return sk_gr_allocate_texture(ctx, cache, params, origBitmap, desc,
bitmap->getPixels(), bitmap->rowBytes());
}
static GrTexture* get_texture_backing_bmp(const SkBitmap& bitmap, const GrContext* context,
......@@ -520,23 +393,12 @@ bool GrIsBitmapInCache(const GrContext* ctx,
return true;
}
// We don't cache volatile bitmaps
if (bitmap.isVolatile()) {
return false;
}
// If it is inherently texture backed, consider it in the cache
if (bitmap.getTexture()) {
return true;
}
Stretch stretch = get_stretch_type(ctx, bitmap.width(), bitmap.height(), params);
GrContentKey key, resizedKey;
generate_bitmap_keys(bitmap, stretch, &key, &resizedKey);
GrContentKey key;
generate_bitmap_key(bitmap, &key);
GrSurfaceDesc desc;
generate_bitmap_texture_desc(bitmap, &desc);
return ctx->isResourceInCache((kNo_Stretch == stretch) ? key : resizedKey);
return ctx->isTextureInCache(desc, key, params);
}
GrTexture* GrRefCachedBitmapTexture(GrContext* ctx,
......@@ -547,29 +409,29 @@ GrTexture* GrRefCachedBitmapTexture(GrContext* ctx,
return SkRef(result);
}
Stretch stretch = get_stretch_type(ctx, bitmap.width(), bitmap.height(), params);
GrContentKey key, resizedKey;
bool cache = !bitmap.isVolatile();
if (!bitmap.isVolatile()) {
if (cache) {
// If the bitmap isn't changing try to find a cached copy first.
generate_bitmap_keys(bitmap, stretch, &key, &resizedKey);
result = ctx->findAndRefCachedTexture(resizedKey.isValid() ? resizedKey : key);
if (result) {
return result;
}
}
result = create_bitmap_texture(ctx, bitmap, stretch, key, resizedKey);
if (result) {
return result;
}
GrContentKey key;
generate_bitmap_key(bitmap, &key);
SkDebugf("---- failed to create texture for cache [%d %d]\n",
bitmap.width(), bitmap.height());
GrSurfaceDesc desc;
generate_bitmap_texture_desc(bitmap, &desc);
return NULL;
result = ctx->findAndRefTexture(desc, key, params);
}
if (NULL == result) {
result = sk_gr_create_bitmap_texture(ctx, cache, params, bitmap);
}
if (NULL == result) {
SkDebugf("---- failed to create texture for cache [%d %d]\n",
bitmap.width(), bitmap.height());
}
return result;
}
///////////////////////////////////////////////////////////////////////////////
// alphatype is ignore for now, but if GrPixelConfig is expanded to encompass
......
......@@ -190,6 +190,7 @@ GrTextureStripAtlas::AtlasRow* GrTextureStripAtlas::getLRU() {
}
void GrTextureStripAtlas::lockTexture() {
GrTextureParams params;
GrSurfaceDesc texDesc;
texDesc.fWidth = fDesc.fWidth;
texDesc.fHeight = fDesc.fHeight;
......@@ -201,10 +202,9 @@ void GrTextureStripAtlas::lockTexture() {
builder[0] = static_cast<uint32_t>(fCacheKey);
builder.finish();
fTexture = fDesc.fContext->findAndRefCachedTexture(key);
fTexture = fDesc.fContext->findAndRefTexture(texDesc, key, &params);
if (NULL == fTexture) {
fTexture = fDesc.fContext->createTexture(texDesc, NULL, 0);
SkAssertResult(fDesc.fContext->addResourceToCache(key, fTexture));
fTexture = fDesc.fContext->createTexture(&params, texDesc, key, NULL, 0);
// This is a new texture, so all of our cache info is now invalid
this->initLRU();
fKeyTable.rewind();
......
......@@ -114,14 +114,14 @@ static GrRenderTarget* random_render_target(GrContext* context, SkRandom* random
builder[0] = texDesc.fOrigin;
builder.finish();
GrTexture* texture = context->findAndRefCachedTexture(key);
SkAutoTUnref<GrTexture> texture(context->findAndRefTexture(texDesc, key, &params));
if (!texture) {
texture = context->createTexture(texDesc);
if (texture) {
SkAssertResult(context->addResourceToCache(key, texture));
texture.reset(context->createTexture(&params, texDesc, key, 0, 0));
if (!texture) {
return NULL;
}
}
return texture ? texture->asRenderTarget() : NULL;
return SkRef(texture->asRenderTarget());
}
static void set_random_xpf(GrContext* context, const GrDrawTargetCaps& caps,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment