search_cache = get_cache('locmem:///') _CACHE_TIMEOUT = 60*60*5 # 5 min def search_cacheable(lang, query): key = urllib.quote(lang + query) if not search_cache.get(key): results4blogs = Entry.objects.all() results4pages = Page.objects.filter(language__exact=lang) results4photos = Photo.objects.all() results4comments = Comment.objects.all() for term in query.split(): results4blogs = results4blogs.filter(Q(headline__icontains=term) | Q(body__icontains=term)) results4pages = results4pages.filter(Q(title__icontains=term) | Q(content__icontains=term)) results4photos = results4photos.filter(Q(title__icontains=term)) results4comments = results4comments.filter(Q(headline__icontains=term) | Q(comment__icontains=term)) rslt = list(set(results4blogs) | set(results4pages) | set(results4photos) | set(results4comments)) search_cache.set(key, rslt, _CACHE_TIMEOUT) print 'cache created for "%s" in %s' % (query, lang.upper()) else: rslt = search_cache.get(key) print 'cache hit for "%s" in %s' % (query, lang.upper()) return rslt def paginate_results(request): paginate_by = 5 orphans = 1 lang = request.LANGUAGE_CODE page = int(request.GET.get('page', '1')) query = urllib.unquote(request.GET.get('search_query', '')).lower() results = search_cacheable(lang, query) paginator = ObjectPaginator(results, paginate_by, orphans) try: collection = paginator.get_page(page - 1) except InvalidPage: raise Http404 return render_to_response('results.html', {'results': collection, 'query':query, 'is_paginated': paginator.pages > 1, 'results_per_page': paginate_by, 'has_next': paginator.has_next_page(page - 1), 'has_previous': paginator.has_previous_page(page - 1), 'page': page, 'next': page + 1, 'previous': page - 1, 'pages': paginator.pages, 'hits' : paginator.hits,}, context_instance=RequestContext(request),)