Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125  'mongodb://user:password@server:port/database' # experimental 
  126   
  127  For more info: 
  128  help(DAL) 
  129  help(Field) 
  130  """ 
  131   
  132  ################################################################################### 
  133  # this file only exposes DAL and Field 
  134  ################################################################################### 
  135   
  136  __all__ = ['DAL', 'Field'] 
  137   
  138  DEFAULTLENGTH = {'string':512, 
  139                   'password':512, 
  140                   'upload':512, 
  141                   'text':2**15, 
  142                   'blob':2**31} 
  143  TIMINGSSIZE = 100 
  144  SPATIALLIBS = { 
  145      'Windows':'libspatialite', 
  146      'Linux':'libspatialite.so', 
  147      'Darwin':'libspatialite.dylib' 
  148      } 
  149  DEFAULT_URI = 'sqlite://dummy.db' 
  150   
  151  import re 
  152  import sys 
  153  import locale 
  154  import os 
  155  import types 
  156  import datetime 
  157  import threading 
  158  import time 
  159  import csv 
  160  import cgi 
  161  import copy 
  162  import socket 
  163  import logging 
  164  import base64 
  165  import shutil 
  166  import marshal 
  167  import decimal 
  168  import struct 
  169  import urllib 
  170  import hashlib 
  171  import uuid 
  172  import glob 
  173  import traceback 
  174  import platform 
  175   
  176  PYTHON_VERSION = sys.version_info[:3] 
  177  if PYTHON_VERSION[0] == 2: 
  178      import cPickle as pickle 
  179      import cStringIO as StringIO 
  180      import copy_reg as copyreg 
  181      hashlib_md5 = hashlib.md5 
  182      bytes, unicode = str, unicode 
  183  else: 
  184      import pickle 
  185      from io import StringIO as StringIO 
  186      import copyreg 
  187      long = int 
  188      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  189      bytes, unicode = bytes, str 
  190   
  191  if PYTHON_VERSION[:2] < (2, 7): 
  192      from gluon.contrib.ordereddict import OrderedDict 
  193  else: 
  194      from collections import OrderedDict 
  195   
  196   
  197  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  198                   types.BuiltinFunctionType, 
  199                   types.MethodType, types.BuiltinMethodType) 
  200   
  201  TABLE_ARGS = set( 
  202      ('migrate','primarykey','fake_migrate','format','redefine', 
  203       'singular','plural','trigger_name','sequence_name','fields', 
  204       'common_filter','polymodel','table_class','on_define','rname')) 
  205   
  206  SELECT_ARGS = set( 
  207      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  208       'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) 
  209   
  210  ogetattr = object.__getattribute__ 
  211  osetattr = object.__setattr__ 
  212  exists = os.path.exists 
  213  pjoin = os.path.join 
  214   
  215  ################################################################################### 
  216  # following checks allow the use of dal without web2py, as a standalone module 
  217  ################################################################################### 
  218  try: 
  219      from gluon.utils import web2py_uuid 
  220  except (ImportError, SystemError): 
  221      import uuid 
222 - def web2py_uuid(): return str(uuid.uuid4())
223 224 try: 225 import portalocker 226 have_portalocker = True 227 except ImportError: 228 have_portalocker = False 229 230 try: 231 from gluon import serializers 232 have_serializers = True 233 except ImportError: 234 have_serializers = False 235 try: 236 import json as simplejson 237 except ImportError: 238 try: 239 import gluon.contrib.simplejson as simplejson 240 except ImportError: 241 simplejson = None 242 243 LOGGER = logging.getLogger("web2py.dal") 244 DEFAULT = lambda:0 245 246 GLOBAL_LOCKER = threading.RLock() 247 THREAD_LOCAL = threading.local() 248 249 # internal representation of tables with field 250 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 251 252 REGEX_TYPE = re.compile('^([\w\_\:]+)') 253 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 254 REGEX_W = re.compile('^\w+$') 255 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.([^.]+)$') 256 REGEX_NO_GREEDY_ENTITY_NAME = r'(.+?)' 257 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)(\.(?P<name>\w+))?\.\w+$') 258 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 259 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 260 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 261 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 262 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 263 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 264 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 265 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 266 REGEX_QUOTES = re.compile("'[^']*'") 267 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 268 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 269 REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)' 270 271 # list of drivers will be built on the fly 272 # and lists only what is available 273 DRIVERS = [] 274 275 try: 276 from new import classobj 277 from google.appengine.ext import db as gae 278 from google.appengine.ext import ndb 279 from google.appengine.api import namespace_manager, rdbms 280 from google.appengine.api.datastore_types import Key ### for belongs on ID 281 from google.appengine.ext.db.polymodel import PolyModel 282 from google.appengine.ext.ndb.polymodel import PolyModel as NDBPolyModel 283 DRIVERS.append('google') 284 except ImportError: 285 pass 286 287 if not 'google' in DRIVERS: 288 289 try: 290 from pysqlite2 import dbapi2 as sqlite2 291 DRIVERS.append('SQLite(sqlite2)') 292 except ImportError: 293 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 294 295 try: 296 from sqlite3 import dbapi2 as sqlite3 297 DRIVERS.append('SQLite(sqlite3)') 298 except ImportError: 299 LOGGER.debug('no SQLite drivers sqlite3') 300 301 try: 302 # first try contrib driver, then from site-packages (if installed) 303 try: 304 import gluon.contrib.pymysql as pymysql 305 # monkeypatch pymysql because they havent fixed the bug: 306 # https://github.com/petehunt/PyMySQL/issues/86 307 pymysql.ESCAPE_REGEX = re.compile("'") 308 pymysql.ESCAPE_MAP = {"'": "''"} 309 # end monkeypatch 310 except ImportError: 311 import pymysql 312 DRIVERS.append('MySQL(pymysql)') 313 except ImportError: 314 LOGGER.debug('no MySQL driver pymysql') 315 316 try: 317 import MySQLdb 318 DRIVERS.append('MySQL(MySQLdb)') 319 except ImportError: 320 LOGGER.debug('no MySQL driver MySQLDB') 321 322 try: 323 import mysql.connector as mysqlconnector 324 DRIVERS.append("MySQL(mysqlconnector)") 325 except ImportError: 326 LOGGER.debug("no driver mysql.connector") 327 328 try: 329 import psycopg2 330 from psycopg2.extensions import adapt as psycopg2_adapt 331 DRIVERS.append('PostgreSQL(psycopg2)') 332 except ImportError: 333 LOGGER.debug('no PostgreSQL driver psycopg2') 334 335 try: 336 # first try contrib driver, then from site-packages (if installed) 337 try: 338 import gluon.contrib.pg8000.dbapi as pg8000 339 except ImportError: 340 import pg8000.dbapi as pg8000 341 DRIVERS.append('PostgreSQL(pg8000)') 342 except ImportError: 343 LOGGER.debug('no PostgreSQL driver pg8000') 344 345 try: 346 import cx_Oracle 347 DRIVERS.append('Oracle(cx_Oracle)') 348 except ImportError: 349 LOGGER.debug('no Oracle driver cx_Oracle') 350 351 try: 352 try: 353 import pyodbc 354 except ImportError: 355 try: 356 import gluon.contrib.pypyodbc as pyodbc 357 except Exception, e: 358 raise ImportError(str(e)) 359 DRIVERS.append('MSSQL(pyodbc)') 360 DRIVERS.append('DB2(pyodbc)') 361 DRIVERS.append('Teradata(pyodbc)') 362 DRIVERS.append('Ingres(pyodbc)') 363 except ImportError: 364 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 365 366 try: 367 import Sybase 368 DRIVERS.append('Sybase(Sybase)') 369 except ImportError: 370 LOGGER.debug('no Sybase driver') 371 372 try: 373 import kinterbasdb 374 DRIVERS.append('Interbase(kinterbasdb)') 375 DRIVERS.append('Firebird(kinterbasdb)') 376 except ImportError: 377 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 378 379 try: 380 import fdb 381 DRIVERS.append('Firebird(fdb)') 382 except ImportError: 383 LOGGER.debug('no Firebird driver fdb') 384 ##### 385 try: 386 import firebirdsql 387 DRIVERS.append('Firebird(firebirdsql)') 388 except ImportError: 389 LOGGER.debug('no Firebird driver firebirdsql') 390 391 try: 392 import informixdb 393 DRIVERS.append('Informix(informixdb)') 394 LOGGER.warning('Informix support is experimental') 395 except ImportError: 396 LOGGER.debug('no Informix driver informixdb') 397 398 try: 399 import sapdb 400 DRIVERS.append('SQL(sapdb)') 401 LOGGER.warning('SAPDB support is experimental') 402 except ImportError: 403 LOGGER.debug('no SAP driver sapdb') 404 405 try: 406 import cubriddb 407 DRIVERS.append('Cubrid(cubriddb)') 408 LOGGER.warning('Cubrid support is experimental') 409 except ImportError: 410 LOGGER.debug('no Cubrid driver cubriddb') 411 412 try: 413 from com.ziclix.python.sql import zxJDBC 414 import java.sql 415 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 416 from org.sqlite import JDBC # required by java.sql; ensure we have it 417 zxJDBC_sqlite = java.sql.DriverManager 418 DRIVERS.append('PostgreSQL(zxJDBC)') 419 DRIVERS.append('SQLite(zxJDBC)') 420 LOGGER.warning('zxJDBC support is experimental') 421 is_jdbc = True 422 except ImportError: 423 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 424 is_jdbc = False 425 426 try: 427 import couchdb 428 DRIVERS.append('CouchDB(couchdb)') 429 except ImportError: 430 LOGGER.debug('no Couchdb driver couchdb') 431 432 try: 433 import pymongo 434 DRIVERS.append('MongoDB(pymongo)') 435 except: 436 LOGGER.debug('no MongoDB driver pymongo') 437 438 try: 439 import imaplib 440 DRIVERS.append('IMAP(imaplib)') 441 except: 442 LOGGER.debug('no IMAP driver imaplib') 443 444 PLURALIZE_RULES = [ 445 (re.compile('child$'), re.compile('child$'), 'children'), 446 (re.compile('oot$'), re.compile('oot$'), 'eet'), 447 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 448 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 449 (re.compile('sis$'), re.compile('sis$'), 'ses'), 450 (re.compile('man$'), re.compile('man$'), 'men'), 451 (re.compile('ife$'), re.compile('ife$'), 'ives'), 452 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 453 (re.compile('lf$'), re.compile('lf$'), 'lves'), 454 (re.compile('[sxz]$'), re.compile('$'), 'es'), 455 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 456 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 457 (re.compile('$'), re.compile('$'), 's'), 458 ]
459 460 -def pluralize(singular, rules=PLURALIZE_RULES):
461 for line in rules: 462 re_search, re_sub, replace = line 463 plural = re_search.search(singular) and re_sub.sub(replace, singular) 464 if plural: return plural
465
466 -def hide_password(uri):
467 if isinstance(uri,(list,tuple)): 468 return [hide_password(item) for item in uri] 469 return REGEX_NOPASSWD.sub('******',uri)
470
471 -def OR(a,b):
472 return a|b
473
474 -def AND(a,b):
475 return a&b
476
477 -def IDENTITY(x): return x
478
479 -def varquote_aux(name,quotestr='%s'):
480 return name if REGEX_W.match(name) else quotestr % name
481
482 -def quote_keyword(a,keyword='timestamp'):
483 regex = re.compile('\.keyword(?=\w)') 484 a = regex.sub('."%s"' % keyword,a) 485 return a
486 487 if 'google' in DRIVERS: 488 489 is_jdbc = False
490 491 - class GAEDecimalProperty(gae.Property):
492 """ 493 GAE decimal implementation 494 """ 495 data_type = decimal.Decimal 496
497 - def __init__(self, precision, scale, **kwargs):
498 super(GAEDecimalProperty, self).__init__(self, **kwargs) 499 d = '1.' 500 for x in range(scale): 501 d += '0' 502 self.round = decimal.Decimal(d)
503
504 - def get_value_for_datastore(self, model_instance):
505 value = super(GAEDecimalProperty, self)\ 506 .get_value_for_datastore(model_instance) 507 if value is None or value == '': 508 return None 509 else: 510 return str(value)
511
512 - def make_value_from_datastore(self, value):
513 if value is None or value == '': 514 return None 515 else: 516 return decimal.Decimal(value).quantize(self.round)
517
518 - def validate(self, value):
519 value = super(GAEDecimalProperty, self).validate(value) 520 if value is None or isinstance(value, decimal.Decimal): 521 return value 522 elif isinstance(value, basestring): 523 return decimal.Decimal(value) 524 raise gae.BadValueError("Property %s must be a Decimal or string."\ 525 % self.name)
526
527 #TODO Needs more testing 528 - class NDBDecimalProperty(ndb.StringProperty):
529 """ 530 NDB decimal implementation 531 """ 532 data_type = decimal.Decimal 533
534 - def __init__(self, precision, scale, **kwargs):
535 d = '1.' 536 for x in range(scale): 537 d += '0' 538 self.round = decimal.Decimal(d)
539
540 - def _to_base_type(self, value):
541 if value is None or value == '': 542 return None 543 else: 544 return str(value)
545
546 - def _from_base_type(self, value):
547 if value is None or value == '': 548 return None 549 else: 550 return decimal.Decimal(value).quantize(self.round)
551
552 - def _validate(self, value):
553 if value is None or isinstance(value, decimal.Decimal): 554 return value 555 elif isinstance(value, basestring): 556 return decimal.Decimal(value) 557 raise TypeError("Property %s must be a Decimal or string."\ 558 % self._name)
559
560 ################################################################################### 561 # class that handles connection pooling (all adapters are derived from this one) 562 ################################################################################### 563 564 -class ConnectionPool(object):
565 566 POOLS = {} 567 check_active_connection = True 568 569 @staticmethod
570 - def set_folder(folder):
572 573 # ## this allows gluon to commit/rollback all dbs in this thread 574
575 - def close(self,action='commit',really=True):
576 if action: 577 if callable(action): 578 action(self) 579 else: 580 getattr(self, action)() 581 # ## if you want pools, recycle this connection 582 if self.pool_size: 583 GLOBAL_LOCKER.acquire() 584 pool = ConnectionPool.POOLS[self.uri] 585 if len(pool) < self.pool_size: 586 pool.append(self.connection) 587 really = False 588 GLOBAL_LOCKER.release() 589 if really: 590 self.close_connection() 591 self.connection = None
592 593 @staticmethod
594 - def close_all_instances(action):
595 """ to close cleanly databases in a multithreaded environment """ 596 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 597 for db_uid, db_group in dbs: 598 for db in db_group: 599 if hasattr(db,'_adapter'): 600 db._adapter.close(action) 601 getattr(THREAD_LOCAL,'db_instances',{}).clear() 602 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 603 if callable(action): 604 action(None) 605 return
606
607 - def find_or_make_work_folder(self):
608 """ this actually does not make the folder. it has to be there """ 609 self.folder = getattr(THREAD_LOCAL,'folder','') 610 611 if (os.path.isabs(self.folder) and 612 isinstance(self, UseDatabaseStoredFile) and 613 self.folder.startswith(os.getcwd())): 614 self.folder = os.path.relpath(self.folder, os.getcwd()) 615 616 # Creating the folder if it does not exist 617 if False and self.folder and not exists(self.folder): 618 os.mkdir(self.folder)
619
620 - def after_connection_hook(self):
621 """hook for the after_connection parameter""" 622 if callable(self._after_connection): 623 self._after_connection(self) 624 self.after_connection()
625
626 - def after_connection(self):
627 """ this it is supposed to be overloaded by adapters""" 628 pass
629
630 - def reconnect(self, f=None, cursor=True):
631 """ 632 this function defines: self.connection and self.cursor 633 (iff cursor is True) 634 if self.pool_size>0 it will try pull the connection from the pool 635 if the connection is not active (closed by db server) it will loop 636 if not self.pool_size or no active connections in pool makes a new one 637 """ 638 if getattr(self,'connection', None) != None: 639 return 640 if f is None: 641 f = self.connector 642 643 # if not hasattr(self, "driver") or self.driver is None: 644 # LOGGER.debug("Skipping connection since there's no driver") 645 # return 646 647 if not self.pool_size: 648 self.connection = f() 649 self.cursor = cursor and self.connection.cursor() 650 else: 651 uri = self.uri 652 POOLS = ConnectionPool.POOLS 653 while True: 654 GLOBAL_LOCKER.acquire() 655 if not uri in POOLS: 656 POOLS[uri] = [] 657 if POOLS[uri]: 658 self.connection = POOLS[uri].pop() 659 GLOBAL_LOCKER.release() 660 self.cursor = cursor and self.connection.cursor() 661 try: 662 if self.cursor and self.check_active_connection: 663 self.execute('SELECT 1;') 664 break 665 except: 666 pass 667 else: 668 GLOBAL_LOCKER.release() 669 self.connection = f() 670 self.cursor = cursor and self.connection.cursor() 671 break 672 self.after_connection_hook()
673
674 ################################################################################### 675 # metaclass to prepare adapter classes static values 676 ################################################################################### 677 -class AdapterMeta(type):
678 """Metaclass to support manipulation of adapter classes. 679 680 At the moment is used to intercept entity_quoting argument passed to DAL. 681 """ 682
683 - def __call__(cls, *args, **kwargs):
684 entity_quoting = kwargs.get('entity_quoting', False) 685 if 'entity_quoting' in kwargs: 686 del kwargs['entity_quoting'] 687 688 obj = super(AdapterMeta, cls).__call__(*args, **kwargs) 689 if not entity_quoting: 690 quot = obj.QUOTE_TEMPLATE = '%s' 691 regex_ent = r'(\w+)' 692 else: 693 quot = obj.QUOTE_TEMPLATE 694 regex_ent = REGEX_NO_GREEDY_ENTITY_NAME 695 obj.REGEX_TABLE_DOT_FIELD = re.compile(r'^' + \ 696 quot % regex_ent + \ 697 r'\.' + \ 698 quot % regex_ent + \ 699 r'$') 700 701 return obj
702
703 ################################################################################### 704 # this is a generic adapter that does nothing; all others are derived from this one 705 ################################################################################### 706 707 -class BaseAdapter(ConnectionPool):
708 709 __metaclass__ = AdapterMeta 710 711 native_json = False 712 driver = None 713 driver_name = None 714 drivers = () # list of drivers from which to pick 715 connection = None 716 commit_on_alter_table = False 717 support_distributed_transaction = False 718 uploads_in_blob = False 719 can_select_for_update = True 720 dbpath = None 721 folder = None 722 connector = lambda *args, **kwargs: None # __init__ should override this 723 724 TRUE = 'T' 725 FALSE = 'F' 726 T_SEP = ' ' 727 QUOTE_TEMPLATE = '"%s"' 728 729 730 types = { 731 'boolean': 'CHAR(1)', 732 'string': 'CHAR(%(length)s)', 733 'text': 'TEXT', 734 'json': 'TEXT', 735 'password': 'CHAR(%(length)s)', 736 'blob': 'BLOB', 737 'upload': 'CHAR(%(length)s)', 738 'integer': 'INTEGER', 739 'bigint': 'INTEGER', 740 'float':'DOUBLE', 741 'double': 'DOUBLE', 742 'decimal': 'DOUBLE', 743 'date': 'DATE', 744 'time': 'TIME', 745 'datetime': 'TIMESTAMP', 746 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 747 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 748 'list:integer': 'TEXT', 749 'list:string': 'TEXT', 750 'list:reference': 'TEXT', 751 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 752 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 753 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 754 'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 755 } 756
757 - def isOperationalError(self,exception):
758 if not hasattr(self.driver, "OperationalError"): 759 return None 760 return isinstance(exception, self.driver.OperationalError)
761
762 - def isProgrammingError(self,exception):
763 if not hasattr(self.driver, "ProgrammingError"): 764 return None 765 return isinstance(exception, self.driver.ProgrammingError)
766
767 - def id_query(self, table):
768 pkeys = getattr(table,'_primarykey',None) 769 if pkeys: 770 return table[pkeys[0]] != None 771 else: 772 return table._id != None
773
774 - def adapt(self, obj):
775 return "'%s'" % obj.replace("'", "''")
776
777 - def smart_adapt(self, obj):
778 if isinstance(obj,(int,float)): 779 return str(obj) 780 return self.adapt(str(obj))
781
782 - def file_exists(self, filename):
783 """ 784 to be used ONLY for files that on GAE may not be on filesystem 785 """ 786 return exists(filename)
787
788 - def file_open(self, filename, mode='rb', lock=True):
789 """ 790 to be used ONLY for files that on GAE may not be on filesystem 791 """ 792 if have_portalocker and lock: 793 fileobj = portalocker.LockedFile(filename,mode) 794 else: 795 fileobj = open(filename,mode) 796 return fileobj
797
798 - def file_close(self, fileobj):
799 """ 800 to be used ONLY for files that on GAE may not be on filesystem 801 """ 802 if fileobj: 803 fileobj.close()
804
805 - def file_delete(self, filename):
806 os.unlink(filename)
807
808 - def find_driver(self,adapter_args,uri=None):
809 self.adapter_args = adapter_args 810 if getattr(self,'driver',None) != None: 811 return 812 drivers_available = [driver for driver in self.drivers 813 if driver in globals()] 814 if uri: 815 items = uri.split('://',1)[0].split(':') 816 request_driver = items[1] if len(items)>1 else None 817 else: 818 request_driver = None 819 request_driver = request_driver or adapter_args.get('driver') 820 if request_driver: 821 if request_driver in drivers_available: 822 self.driver_name = request_driver 823 self.driver = globals().get(request_driver) 824 else: 825 raise RuntimeError("driver %s not available" % request_driver) 826 elif drivers_available: 827 self.driver_name = drivers_available[0] 828 self.driver = globals().get(self.driver_name) 829 else: 830 raise RuntimeError("no driver available %s" % str(self.drivers))
831
832 - def log(self, message, table=None):
833 """ Logs migrations 834 835 It will not log changes if logfile is not specified. Defaults 836 to sql.log 837 """ 838 839 isabs = None 840 logfilename = self.adapter_args.get('logfile','sql.log') 841 writelog = bool(logfilename) 842 if writelog: 843 isabs = os.path.isabs(logfilename) 844 845 if table and table._dbt and writelog and self.folder: 846 if isabs: 847 table._loggername = logfilename 848 else: 849 table._loggername = pjoin(self.folder, logfilename) 850 logfile = self.file_open(table._loggername, 'a') 851 logfile.write(message) 852 self.file_close(logfile)
853 854
855 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 856 credential_decoder=IDENTITY, driver_args={}, 857 adapter_args={},do_connect=True, after_connection=None):
858 self.db = db 859 self.dbengine = "None" 860 self.uri = uri 861 self.pool_size = pool_size 862 self.folder = folder 863 self.db_codec = db_codec 864 self._after_connection = after_connection 865 class Dummy(object): 866 lastrowid = 1 867 def __getattr__(self, value): 868 return lambda *a, **b: []
869 self.connection = Dummy() 870 self.cursor = Dummy() 871 872
873 - def sequence_name(self,tablename):
874 return self.QUOTE_TEMPLATE % ('%s_sequence' % tablename)
875
876 - def trigger_name(self,tablename):
877 return '%s_sequence' % tablename
878
879 - def varquote(self,name):
880 return name
881
882 - def create_table(self, table, 883 migrate=True, 884 fake_migrate=False, 885 polymodel=None):
886 db = table._db 887 fields = [] 888 # PostGIS geo fields are added after the table has been created 889 postcreation_fields = [] 890 sql_fields = {} 891 sql_fields_aux = {} 892 TFK = {} 893 tablename = table._tablename 894 sortable = 0 895 types = self.types 896 for field in table: 897 sortable += 1 898 field_name = field.name 899 field_type = field.type 900 if isinstance(field_type,SQLCustomType): 901 ftype = field_type.native or field_type.type 902 elif field_type.startswith('reference'): 903 referenced = field_type[10:].strip() 904 if referenced == '.': 905 referenced = tablename 906 constraint_name = self.constraint_name(tablename, field_name) 907 # if not '.' in referenced \ 908 # and referenced != tablename \ 909 # and hasattr(table,'_primarykey'): 910 # ftype = types['integer'] 911 #else: 912 try: 913 rtable = db[referenced] 914 rfield = rtable._id 915 rfieldname = rfield.name 916 rtablename = referenced 917 except (KeyError, ValueError, AttributeError), e: 918 LOGGER.debug('Error: %s' % e) 919 try: 920 rtablename,rfieldname = referenced.split('.') 921 rtable = db[rtablename] 922 rfield = rtable[rfieldname] 923 except Exception, e: 924 LOGGER.debug('Error: %s' %e) 925 raise KeyError('Cannot resolve reference %s in %s definition' % (referenced, table._tablename)) 926 927 # must be PK reference or unique 928 if getattr(rtable, '_primarykey', None) and rfieldname in rtable._primarykey or \ 929 rfield.unique: 930 ftype = types[rfield.type[:9]] % \ 931 dict(length=rfield.length) 932 # multicolumn primary key reference? 933 if not rfield.unique and len(rtable._primarykey)>1: 934 # then it has to be a table level FK 935 if rtablename not in TFK: 936 TFK[rtablename] = {} 937 TFK[rtablename][rfieldname] = field_name 938 else: 939 ftype = ftype + \ 940 types['reference FK'] % dict( 941 constraint_name = constraint_name, # should be quoted 942 foreign_key = rtable.sqlsafe + ' (' + rfield.sqlsafe_name + ')', 943 table_name = table.sqlsafe, 944 field_name = field.sqlsafe_name, 945 on_delete_action=field.ondelete) 946 else: 947 # make a guess here for circular references 948 if referenced in db: 949 id_fieldname = db[referenced]._id.sqlsafe_name 950 elif referenced == tablename: 951 id_fieldname = table._id.sqlsafe_name 952 else: #make a guess 953 id_fieldname = self.QUOTE_TEMPLATE % 'id' 954 #gotcha: the referenced table must be defined before 955 #the referencing one to be able to create the table 956 #Also if it's not recommended, we can still support 957 #references to tablenames without rname to make 958 #migrations and model relationship work also if tables 959 #are not defined in order 960 if referenced == tablename: 961 real_referenced = db[referenced].sqlsafe 962 else: 963 real_referenced = (referenced in db 964 and db[referenced].sqlsafe 965 or referenced) 966 rfield = db[referenced]._id 967 ftype = types[field_type[:9]] % dict( 968 index_name = self.QUOTE_TEMPLATE % (field_name+'__idx'), 969 field_name = field.sqlsafe_name, 970 constraint_name = self.QUOTE_TEMPLATE % constraint_name, 971 foreign_key = '%s (%s)' % (real_referenced, rfield.sqlsafe_name), 972 on_delete_action=field.ondelete) 973 elif field_type.startswith('list:reference'): 974 ftype = types[field_type[:14]] 975 elif field_type.startswith('decimal'): 976 precision, scale = map(int,field_type[8:-1].split(',')) 977 ftype = types[field_type[:7]] % \ 978 dict(precision=precision,scale=scale) 979 elif field_type.startswith('geo'): 980 if not hasattr(self,'srid'): 981 raise RuntimeError('Adapter does not support geometry') 982 srid = self.srid 983 geotype, parms = field_type[:-1].split('(') 984 if not geotype in types: 985 raise SyntaxError( 986 'Field: unknown field type: %s for %s' \ 987 % (field_type, field_name)) 988 ftype = types[geotype] 989 if self.dbengine == 'postgres' and geotype == 'geometry': 990 # parameters: schema, srid, dimension 991 dimension = 2 # GIS.dimension ??? 992 parms = parms.split(',') 993 if len(parms) == 3: 994 schema, srid, dimension = parms 995 elif len(parms) == 2: 996 schema, srid = parms 997 else: 998 schema = parms[0] 999 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 1000 ftype = ftype % dict(schema=schema, 1001 tablename=tablename, 1002 fieldname=field_name, srid=srid, 1003 dimension=dimension) 1004 postcreation_fields.append(ftype) 1005 elif not field_type in types: 1006 raise SyntaxError('Field: unknown field type: %s for %s' % \ 1007 (field_type, field_name)) 1008 else: 1009 ftype = types[field_type]\ 1010 % dict(length=field.length) 1011 if not field_type.startswith('id') and \ 1012 not field_type.startswith('reference'): 1013 if field.notnull: 1014 ftype += ' NOT NULL' 1015 else: 1016 ftype += self.ALLOW_NULL() 1017 if field.unique: 1018 ftype += ' UNIQUE' 1019 if field.custom_qualifier: 1020 ftype += ' %s' % field.custom_qualifier 1021 1022 # add to list of fields 1023 sql_fields[field_name] = dict( 1024 length=field.length, 1025 unique=field.unique, 1026 notnull=field.notnull, 1027 sortable=sortable, 1028 type=str(field_type), 1029 sql=ftype) 1030 1031 if field.notnull and not field.default is None: 1032 # Caveat: sql_fields and sql_fields_aux 1033 # differ for default values. 1034 # sql_fields is used to trigger migrations and sql_fields_aux 1035 # is used for create tables. 1036 # The reason is that we do not want to trigger 1037 # a migration simply because a default value changes. 1038 not_null = self.NOT_NULL(field.default, field_type) 1039 ftype = ftype.replace('NOT NULL', not_null) 1040 sql_fields_aux[field_name] = dict(sql=ftype) 1041 # Postgres - PostGIS: 1042 # geometry fields are added after the table has been created, not now 1043 if not (self.dbengine == 'postgres' and \ 1044 field_type.startswith('geom')): 1045 fields.append('%s %s' % (field.sqlsafe_name, ftype)) 1046 other = ';' 1047 1048 # backend-specific extensions to fields 1049 if self.dbengine == 'mysql': 1050 if not hasattr(table, "_primarykey"): 1051 fields.append('PRIMARY KEY (%s)' % (self.QUOTE_TEMPLATE % table._id.name)) 1052 engine = self.adapter_args.get('engine','InnoDB') 1053 other = ' ENGINE=%s CHARACTER SET utf8;' % engine 1054 1055 fields = ',\n '.join(fields) 1056 for rtablename in TFK: 1057 rfields = TFK[rtablename] 1058 pkeys = [self.QUOTE_TEMPLATE % pk for pk in db[rtablename]._primarykey] 1059 fkeys = [self.QUOTE_TEMPLATE % rfields[k].name for k in pkeys ] 1060 fields = fields + ',\n ' + \ 1061 types['reference TFK'] % dict( 1062 table_name = table.sqlsafe, 1063 field_name=', '.join(fkeys), 1064 foreign_table = table.sqlsafe, 1065 foreign_key = ', '.join(pkeys), 1066 on_delete_action = field.ondelete) 1067 1068 table_rname = table.sqlsafe 1069 1070 if getattr(table,'_primarykey',None): 1071 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 1072 (table.sqlsafe, fields, 1073 self.PRIMARY_KEY(', '.join([self.QUOTE_TEMPLATE % pk for pk in table._primarykey])),other) 1074 else: 1075 query = "CREATE TABLE %s(\n %s\n)%s" % \ 1076 (table.sqlsafe, fields, other) 1077 1078 if self.uri.startswith('sqlite:///') \ 1079 or self.uri.startswith('spatialite:///'): 1080 path_encoding = sys.getfilesystemencoding() \ 1081 or locale.getdefaultlocale()[1] or 'utf8' 1082 dbpath = self.uri[9:self.uri.rfind('/')]\ 1083 .decode('utf8').encode(path_encoding) 1084 else: 1085 dbpath = self.folder 1086 1087 if not migrate: 1088 return query 1089 elif self.uri.startswith('sqlite:memory')\ 1090 or self.uri.startswith('spatialite:memory'): 1091 table._dbt = None 1092 elif isinstance(migrate, str): 1093 table._dbt = pjoin(dbpath, migrate) 1094 else: 1095 table._dbt = pjoin( 1096 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 1097 1098 if not table._dbt or not self.file_exists(table._dbt): 1099 if table._dbt: 1100 self.log('timestamp: %s\n%s\n' 1101 % (datetime.datetime.today().isoformat(), 1102 query), table) 1103 if not fake_migrate: 1104 self.create_sequence_and_triggers(query,table) 1105 table._db.commit() 1106 # Postgres geom fields are added now, 1107 # after the table has been created 1108 for query in postcreation_fields: 1109 self.execute(query) 1110 table._db.commit() 1111 if table._dbt: 1112 tfile = self.file_open(table._dbt, 'w') 1113 pickle.dump(sql_fields, tfile) 1114 self.file_close(tfile) 1115 if fake_migrate: 1116 self.log('faked!\n', table) 1117 else: 1118 self.log('success!\n', table) 1119 else: 1120 tfile = self.file_open(table._dbt, 'r') 1121 try: 1122 sql_fields_old = pickle.load(tfile) 1123 except EOFError: 1124 self.file_close(tfile) 1125 raise RuntimeError('File %s appears corrupted' % table._dbt) 1126 self.file_close(tfile) 1127 if sql_fields != sql_fields_old: 1128 self.migrate_table( 1129 table, 1130 sql_fields, sql_fields_old, 1131 sql_fields_aux, None, 1132 fake_migrate=fake_migrate 1133 ) 1134 return query
1135
1136 - def migrate_table( 1137 self, 1138 table, 1139 sql_fields, 1140 sql_fields_old, 1141 sql_fields_aux, 1142 logfile, 1143 fake_migrate=False, 1144 ):
1145 1146 # logfile is deprecated (moved to adapter.log method) 1147 db = table._db 1148 db._migrated.append(table._tablename) 1149 tablename = table._tablename 1150 def fix(item): 1151 k,v=item 1152 if not isinstance(v,dict): 1153 v=dict(type='unknown',sql=v) 1154 if self.ignore_field_case is not True: return k, v 1155 return k.lower(),v
1156 # make sure all field names are lower case to avoid 1157 # migrations because of case cahnge 1158 sql_fields = dict(map(fix,sql_fields.iteritems())) 1159 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1160 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1161 if db._debug: 1162 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1163 1164 keys = sql_fields.keys() 1165 for key in sql_fields_old: 1166 if not key in keys: 1167 keys.append(key) 1168 new_add = self.concat_add(tablename) 1169 1170 metadata_change = False 1171 sql_fields_current = copy.copy(sql_fields_old) 1172 for key in keys: 1173 query = None 1174 if not key in sql_fields_old: 1175 sql_fields_current[key] = sql_fields[key] 1176 if self.dbengine in ('postgres',) and \ 1177 sql_fields[key]['type'].startswith('geometry'): 1178 # 'sql' == ftype in sql 1179 query = [ sql_fields[key]['sql'] ] 1180 else: 1181 query = ['ALTER TABLE %s ADD %s %s;' % \ 1182 (table.sqlsafe, key, 1183 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1184 metadata_change = True 1185 elif self.dbengine in ('sqlite', 'spatialite'): 1186 if key in sql_fields: 1187 sql_fields_current[key] = sql_fields[key] 1188 metadata_change = True 1189 elif not key in sql_fields: 1190 del sql_fields_current[key] 1191 ftype = sql_fields_old[key]['type'] 1192 if (self.dbengine in ('postgres',) and 1193 ftype.startswith('geometry')): 1194 geotype, parms = ftype[:-1].split('(') 1195 schema = parms.split(',')[0] 1196 query = [ "SELECT DropGeometryColumn ('%(schema)s', "+ 1197 "'%(table)s', '%(field)s');" % 1198 dict(schema=schema, table=tablename, field=key,) ] 1199 elif self.dbengine in ('firebird',): 1200 query = ['ALTER TABLE %s DROP %s;' % 1201 (self.QUOTE_TEMPLATE % tablename, self.QUOTE_TEMPLATE % key)] 1202 else: 1203 query = ['ALTER TABLE %s DROP COLUMN %s;' % 1204 (self.QUOTE_TEMPLATE % tablename, self.QUOTE_TEMPLATE % key)] 1205 metadata_change = True 1206 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1207 and not (key in table.fields and 1208 isinstance(table[key].type, SQLCustomType)) \ 1209 and not sql_fields[key]['type'].startswith('reference')\ 1210 and not sql_fields[key]['type'].startswith('double')\ 1211 and not sql_fields[key]['type'].startswith('id'): 1212 sql_fields_current[key] = sql_fields[key] 1213 t = tablename 1214 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1215 if self.dbengine in ('firebird',): 1216 drop_expr = 'ALTER TABLE %s DROP %s;' 1217 else: 1218 drop_expr = 'ALTER TABLE %s DROP COLUMN %s;' 1219 key_tmp = key + '__tmp' 1220 query = ['ALTER TABLE %s ADD %s %s;' % (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key_tmp, tt), 1221 'UPDATE %s SET %s=%s;' % 1222 (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key_tmp, self.QUOTE_TEMPLATE % key), 1223 drop_expr % (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key), 1224 'ALTER TABLE %s ADD %s %s;' % 1225 (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key, tt), 1226 'UPDATE %s SET %s=%s;' % 1227 (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key, self.QUOTE_TEMPLATE % key_tmp), 1228 drop_expr % (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key_tmp)] 1229 metadata_change = True 1230 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1231 sql_fields_current[key] = sql_fields[key] 1232 metadata_change = True 1233 1234 if query: 1235 self.log('timestamp: %s\n' 1236 % datetime.datetime.today().isoformat(), table) 1237 db['_lastsql'] = '\n'.join(query) 1238 for sub_query in query: 1239 self.log(sub_query + '\n', table) 1240 if fake_migrate: 1241 if db._adapter.commit_on_alter_table: 1242 self.save_dbt(table,sql_fields_current) 1243 self.log('faked!\n', table) 1244 else: 1245 self.execute(sub_query) 1246 # Caveat: mysql, oracle and firebird 1247 # do not allow multiple alter table 1248 # in one transaction so we must commit 1249 # partial transactions and 1250 # update table._dbt after alter table. 1251 if db._adapter.commit_on_alter_table: 1252 db.commit() 1253 self.save_dbt(table,sql_fields_current) 1254 self.log('success!\n', table) 1255 1256 elif metadata_change: 1257 self.save_dbt(table,sql_fields_current) 1258 1259 if metadata_change and not (query and db._adapter.commit_on_alter_table): 1260 db.commit() 1261 self.save_dbt(table,sql_fields_current) 1262 self.log('success!\n', table) 1263
1264 - def save_dbt(self,table, sql_fields_current):
1265 tfile = self.file_open(table._dbt, 'w') 1266 pickle.dump(sql_fields_current, tfile) 1267 self.file_close(tfile)
1268
1269 - def LOWER(self, first):
1270 return 'LOWER(%s)' % self.expand(first)
1271
1272 - def UPPER(self, first):
1273 return 'UPPER(%s)' % self.expand(first)
1274
1275 - def COUNT(self, first, distinct=None):
1276 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1277 % self.expand(first)
1278
1279 - def EXTRACT(self, first, what):
1280 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1281
1282 - def EPOCH(self, first):
1283 return self.EXTRACT(first, 'epoch')
1284
1285 - def LENGTH(self, first):
1286 return "LENGTH(%s)" % self.expand(first)
1287
1288 - def AGGREGATE(self, first, what):
1289 return "%s(%s)" % (what, self.expand(first))
1290
1291 - def JOIN(self):
1292 return 'JOIN'
1293
1294 - def LEFT_JOIN(self):
1295 return 'LEFT JOIN'
1296
1297 - def RANDOM(self):
1298 return 'Random()'
1299
1300 - def NOT_NULL(self, default, field_type):
1301 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1302
1303 - def COALESCE(self, first, second):
1304 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1305 return 'COALESCE(%s)' % ','.join(expressions)
1306
1307 - def COALESCE_ZERO(self, first):
1308 return 'COALESCE(%s,0)' % self.expand(first)
1309
1310 - def RAW(self, first):
1311 return first
1312
1313 - def ALLOW_NULL(self):
1314 return ''
1315
1316 - def SUBSTRING(self, field, parameters):
1317 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1318
1319 - def PRIMARY_KEY(self, key):
1320 return 'PRIMARY KEY(%s)' % key
1321
1322 - def _drop(self, table, mode):
1323 return ['DROP TABLE %s;' % table.sqlsafe]
1324
1325 - def drop(self, table, mode=''):
1326 db = table._db 1327 queries = self._drop(table, mode) 1328 for query in queries: 1329 if table._dbt: 1330 self.log(query + '\n', table) 1331 self.execute(query) 1332 db.commit() 1333 del db[table._tablename] 1334 del db.tables[db.tables.index(table._tablename)] 1335 db._remove_references_to(table) 1336 if table._dbt: 1337 self.file_delete(table._dbt) 1338 self.log('success!\n', table)
1339
1340 - def _insert(self, table, fields):
1341 table_rname = table.sqlsafe 1342 if fields: 1343 keys = ','.join(f.sqlsafe_name for f, v in fields) 1344 values = ','.join(self.expand(v, f.type) for f, v in fields) 1345 return 'INSERT INTO %s(%s) VALUES (%s);' % (table_rname, keys, values) 1346 else: 1347 return self._insert_empty(table)
1348
1349 - def _insert_empty(self, table):
1350 return 'INSERT INTO %s DEFAULT VALUES;' % (table.sqlsafe)
1351
1352 - def insert(self, table, fields):
1353 query = self._insert(table,fields) 1354 try: 1355 self.execute(query) 1356 except Exception: 1357 e = sys.exc_info()[1] 1358 if hasattr(table,'_on_insert_error'): 1359 return table._on_insert_error(table,fields,e) 1360 raise e 1361 if hasattr(table, '_primarykey'): 1362 mydict = dict([(k[0].name, k[1]) for k in fields if k[0].name in table._primarykey]) 1363 if mydict != {}: 1364 return mydict 1365 id = self.lastrowid(table) 1366 if hasattr(table, '_primarykey') and len(table._primarykey) == 1: 1367 id = {table._primarykey[0]: id} 1368 if not isinstance(id, (int, long)): 1369 return id 1370 rid = Reference(id) 1371 (rid._table, rid._record) = (table, None) 1372 return rid
1373
1374 - def bulk_insert(self, table, items):
1375 return [self.insert(table,item) for item in items]
1376
1377 - def NOT(self, first):
1378 return '(NOT %s)' % self.expand(first)
1379
1380 - def AND(self, first, second):
1381 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1382
1383 - def OR(self, first, second):
1384 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1385
1386 - def BELONGS(self, first, second):
1387 if isinstance(second, str): 1388 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1389 if not second: 1390 return '(1=0)' 1391 items = ','.join(self.expand(item, first.type) for item in second) 1392 return '(%s IN (%s))' % (self.expand(first), items)
1393
1394 - def REGEXP(self, first, second):
1395 "regular expression operator" 1396 raise NotImplementedError
1397
1398 - def LIKE(self, first, second):
1399 "case sensitive like operator" 1400 raise NotImplementedError
1401
1402 - def ILIKE(self, first, second):
1403 "case in-sensitive like operator" 1404 return '(%s LIKE %s)' % (self.expand(first), 1405 self.expand(second, 'string'))
1406
1407 - def STARTSWITH(self, first, second):
1408 return '(%s LIKE %s)' % (self.expand(first), 1409 self.expand(second+'%', 'string'))
1410
1411 - def ENDSWITH(self, first, second):
1412 return '(%s LIKE %s)' % (self.expand(first), 1413 self.expand('%'+second, 'string'))
1414
1415 - def CONTAINS(self,first,second,case_sensitive=False):
1416 if first.type in ('string','text', 'json'): 1417 if isinstance(second,Expression): 1418 second = Expression(None,self.CONCAT('%',Expression( 1419 None,self.REPLACE(second,('%','%%'))),'%')) 1420 else: 1421 second = '%'+str(second).replace('%','%%')+'%' 1422 elif first.type.startswith('list:'): 1423 if isinstance(second,Expression): 1424 second = Expression(None,self.CONCAT( 1425 '%|',Expression(None,self.REPLACE( 1426 Expression(None,self.REPLACE( 1427 second,('%','%%'))),('|','||'))),'|%')) 1428 else: 1429 second = '%|'+str(second).replace('%','%%')\ 1430 .replace('|','||')+'|%' 1431 op = case_sensitive and self.LIKE or self.ILIKE 1432 return op(first,second)
1433
1434 - def EQ(self, first, second=None):
1435 if second is None: 1436 return '(%s IS NULL)' % self.expand(first) 1437 return '(%s = %s)' % (self.expand(first), 1438 self.expand(second, first.type))
1439
1440 - def NE(self, first, second=None):
1441 if second is None: 1442 return '(%s IS NOT NULL)' % self.expand(first) 1443 return '(%s <> %s)' % (self.expand(first), 1444 self.expand(second, first.type))
1445
1446 - def LT(self,first,second=None):
1447 if second is None: 1448 raise RuntimeError("Cannot compare %s < None" % first) 1449 return '(%s < %s)' % (self.expand(first), 1450 self.expand(second,first.type))
1451
1452 - def LE(self,first,second=None):
1453 if second is None: 1454 raise RuntimeError("Cannot compare %s <= None" % first) 1455 return '(%s <= %s)' % (self.expand(first), 1456 self.expand(second,first.type))
1457
1458 - def GT(self,first,second=None):
1459 if second is None: 1460 raise RuntimeError("Cannot compare %s > None" % first) 1461 return '(%s > %s)' % (self.expand(first), 1462 self.expand(second,first.type))
1463
1464 - def GE(self,first,second=None):
1465 if second is None: 1466 raise RuntimeError("Cannot compare %s >= None" % first) 1467 return '(%s >= %s)' % (self.expand(first), 1468 self.expand(second,first.type))
1469
1470 - def is_numerical_type(self, ftype):
1471 return ftype in ('integer','boolean','double','bigint') or \ 1472 ftype.startswith('decimal')
1473
1474 - def REPLACE(self, first, (second, third)):
1475 return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), 1476 self.expand(second,'string'), 1477 self.expand(third,'string'))
1478
1479 - def CONCAT(self, *items):
1480 return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
1481
1482 - def ADD(self, first, second):
1483 if self.is_numerical_type(first.type) or isinstance(first.type, Field): 1484 return '(%s + %s)' % (self.expand(first), 1485 self.expand(second, first.type)) 1486 else: 1487 return self.CONCAT(first, second)
1488
1489 - def SUB(self, first, second):
1490 return '(%s - %s)' % (self.expand(first), 1491 self.expand(second, first.type))
1492
1493 - def MUL(self, first, second):
1494 return '(%s * %s)' % (self.expand(first), 1495 self.expand(second, first.type))
1496
1497 - def DIV(self, first, second):
1498 return '(%s / %s)' % (self.expand(first), 1499 self.expand(second, first.type))
1500
1501 - def MOD(self, first, second):
1502 return '(%s %% %s)' % (self.expand(first), 1503 self.expand(second, first.type))
1504
1505 - def AS(self, first, second):
1506 return '%s AS %s' % (self.expand(first), second)
1507
1508 - def ON(self, first, second):
1509 table_rname = self.table_alias(first) 1510 if use_common_filters(second): 1511 second = self.common_filter(second,[first._tablename]) 1512 return ('%s ON %s') % (self.expand(table_rname), self.expand(second))
1513
1514 - def INVERT(self, first):
1515 return '%s DESC' % self.expand(first)
1516
1517 - def COMMA(self, first, second):
1518 return '%s, %s' % (self.expand(first), self.expand(second))
1519
1520 - def CAST(self, first, second):
1521 return 'CAST(%s AS %s)' % (first, second)
1522
1523 - def expand(self, expression, field_type=None, colnames=False):
1524 if isinstance(expression, Field): 1525 et = expression.table 1526 if not colnames: 1527 table_rname = et._ot and self.QUOTE_TEMPLATE % et._tablename or et._rname or self.QUOTE_TEMPLATE % et._tablename 1528 out = '%s.%s' % (table_rname, expression._rname or (self.QUOTE_TEMPLATE % (expression.name))) 1529 else: 1530 out = '%s.%s' % (self.QUOTE_TEMPLATE % et._tablename, self.QUOTE_TEMPLATE % expression.name) 1531 if field_type == 'string' and not expression.type in ( 1532 'string','text','json','password'): 1533 out = self.CAST(out, self.types['text']) 1534 return out 1535 elif isinstance(expression, (Expression, Query)): 1536 first = expression.first 1537 second = expression.second 1538 op = expression.op 1539 optional_args = expression.optional_args or {} 1540 if not second is None: 1541 out = op(first, second, **optional_args) 1542 elif not first is None: 1543 out = op(first,**optional_args) 1544 elif isinstance(op, str): 1545 if op.endswith(';'): 1546 op=op[:-1] 1547 out = '(%s)' % op 1548 else: 1549 out = op() 1550 return out 1551 elif field_type: 1552 return str(self.represent(expression,field_type)) 1553 elif isinstance(expression,(list,tuple)): 1554 return ','.join(self.represent(item,field_type) \ 1555 for item in expression) 1556 elif isinstance(expression, bool): 1557 return '1' if expression else '0' 1558 else: 1559 return str(expression)
1560
1561 - def table_alias(self, tbl):
1562 if not isinstance(tbl, Table): 1563 tbl = self.db[tbl] 1564 return tbl.sqlsafe_alias
1565 1566
1567 - def alias(self, table, alias):
1568 """ 1569 Given a table object, makes a new table object 1570 with alias name. 1571 """ 1572 other = copy.copy(table) 1573 other['_ot'] = other._ot or other.sqlsafe 1574 other['ALL'] = SQLALL(other) 1575 other['_tablename'] = alias 1576 for fieldname in other.fields: 1577 other[fieldname] = copy.copy(other[fieldname]) 1578 other[fieldname]._tablename = alias 1579 other[fieldname].tablename = alias 1580 other[fieldname].table = other 1581 table._db[alias] = other 1582 return other
1583
1584 - def _truncate(self, table, mode=''):
1585 return ['TRUNCATE TABLE %s %s;' % (table.sqlsafe, mode or '')]
1586
1587 - def truncate(self, table, mode= ' '):
1588 # Prepare functions "write_to_logfile" and "close_logfile" 1589 try: 1590 queries = table._db._adapter._truncate(table, mode) 1591 for query in queries: 1592 self.log(query + '\n', table) 1593 self.execute(query) 1594 self.log('success!\n', table) 1595 finally: 1596 pass
1597
1598 - def _update(self, tablename, query, fields):
1599 if query: 1600 if use_common_filters(query): 1601 query = self.common_filter(query, [tablename]) 1602 sql_w = ' WHERE ' + self.expand(query) 1603 else: 1604 sql_w = '' 1605 sql_v = ','.join(['%s=%s' % (field.sqlsafe_name, 1606 self.expand(value, field.type)) \ 1607 for (field, value) in fields]) 1608 tablename = self.db[tablename].sqlsafe 1609 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1610
1611 - def update(self, tablename, query, fields):
1612 sql = self._update(tablename, query, fields) 1613 try: 1614 self.execute(sql) 1615 except Exception: 1616 e = sys.exc_info()[1] 1617 table = self.db[tablename] 1618 if hasattr(table,'_on_update_error'): 1619 return table._on_update_error(table,query,fields,e) 1620 raise e 1621 try: 1622 return self.cursor.rowcount 1623 except: 1624 return None
1625
1626 - def _delete(self, tablename, query):
1627 if query: 1628 if use_common_filters(query): 1629 query = self.common_filter(query, [tablename]) 1630 sql_w = ' WHERE ' + self.expand(query) 1631 else: 1632 sql_w = '' 1633 tablename = self.db[tablename].sqlsafe 1634 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1635
1636 - def delete(self, tablename, query):
1637 sql = self._delete(tablename, query) 1638 ### special code to handle CASCADE in SQLite & SpatiaLite 1639 db = self.db 1640 table = db[tablename] 1641 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1642 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1643 ### end special code to handle CASCADE in SQLite & SpatiaLite 1644 self.execute(sql) 1645 try: 1646 counter = self.cursor.rowcount 1647 except: 1648 counter = None 1649 ### special code to handle CASCADE in SQLite & SpatiaLite 1650 if self.dbengine in ('sqlite', 'spatialite') and counter: 1651 for field in table._referenced_by: 1652 if field.type=='reference '+table._tablename \ 1653 and field.ondelete=='CASCADE': 1654 db(field.belongs(deleted)).delete() 1655 ### end special code to handle CASCADE in SQLite & SpatiaLite 1656 return counter
1657
1658 - def get_table(self, query):
1659 tablenames = self.tables(query) 1660 if len(tablenames)==1: 1661 return tablenames[0] 1662 elif len(tablenames)<1: 1663 raise RuntimeError("No table selected") 1664 else: 1665 raise RuntimeError("Too many tables selected")
1666
1667 - def expand_all(self, fields, tablenames):
1668 db = self.db 1669 new_fields = [] 1670 append = new_fields.append 1671 for item in fields: 1672 if isinstance(item,SQLALL): 1673 new_fields += item._table 1674 elif isinstance(item,str): 1675 m = self.REGEX_TABLE_DOT_FIELD.match(item) 1676 if m: 1677 tablename,fieldname = m.groups() 1678 append(db[tablename][fieldname]) 1679 else: 1680 append(Expression(db,lambda item=item:item)) 1681 else: 1682 append(item) 1683 # ## if no fields specified take them all from the requested tables 1684 if not new_fields: 1685 for table in tablenames: 1686 for field in db[table]: 1687 append(field) 1688 return new_fields
1689
1690 - def _select(self, query, fields, attributes):
1691 tables = self.tables 1692 for key in set(attributes.keys())-SELECT_ARGS: 1693 raise SyntaxError('invalid select attribute: %s' % key) 1694 args_get = attributes.get 1695 tablenames = tables(query) 1696 tablenames_for_common_filters = tablenames 1697 for field in fields: 1698 if isinstance(field, basestring): 1699 m = self.REGEX_TABLE_DOT_FIELD.match(field) 1700 if m: 1701 tn,fn = m.groups() 1702 field = self.db[tn][fn] 1703 for tablename in tables(field): 1704 if not tablename in tablenames: 1705 tablenames.append(tablename) 1706 1707 if len(tablenames) < 1: 1708 raise SyntaxError('Set: no tables selected') 1709 def colexpand(field): 1710 return self.expand(field, colnames=True)
1711 self._colnames = map(colexpand, fields) 1712 def geoexpand(field): 1713 if isinstance(field.type,str) and field.type.startswith('geometry') and isinstance(field, Field): 1714 field = field.st_astext() 1715 return self.expand(field) 1716 sql_f = ', '.join(map(geoexpand, fields)) 1717 sql_o = '' 1718 sql_s = '' 1719 left = args_get('left', False) 1720 inner_join = args_get('join', False) 1721 distinct = args_get('distinct', False) 1722 groupby = args_get('groupby', False) 1723 orderby = args_get('orderby', False) 1724 having = args_get('having', False) 1725 limitby = args_get('limitby', False) 1726 orderby_on_limitby = args_get('orderby_on_limitby', True) 1727 for_update = args_get('for_update', False) 1728 if self.can_select_for_update is False and for_update is True: 1729 raise SyntaxError('invalid select attribute: for_update') 1730 if distinct is True: 1731 sql_s += 'DISTINCT' 1732 elif distinct: 1733 sql_s += 'DISTINCT ON (%s)' % distinct 1734 if inner_join: 1735 icommand = self.JOIN() 1736 if not isinstance(inner_join, (tuple, list)): 1737 inner_join = [inner_join] 1738 ijoint = [t._tablename for t in inner_join 1739 if not isinstance(t,Expression)] 1740 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1741 itables_to_merge={} #issue 490 1742 [itables_to_merge.update( 1743 dict.fromkeys(tables(t))) for t in ijoinon] 1744 ijoinont = [t.first._tablename for t in ijoinon] 1745 [itables_to_merge.pop(t) for t in ijoinont 1746 if t in itables_to_merge] #issue 490 1747 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1748 iexcluded = [t for t in tablenames 1749 if not t in iimportant_tablenames] 1750 if left: 1751 join = attributes['left'] 1752 command = self.LEFT_JOIN() 1753 if not isinstance(join, (tuple, list)): 1754 join = [join] 1755 joint = [t._tablename for t in join 1756 if not isinstance(t, Expression)] 1757 joinon = [t for t in join if isinstance(t, Expression)] 1758 #patch join+left patch (solves problem with ordering in left joins) 1759 tables_to_merge={} 1760 [tables_to_merge.update( 1761 dict.fromkeys(tables(t))) for t in joinon] 1762 joinont = [t.first._tablename for t in joinon] 1763 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1764 tablenames_for_common_filters = [t for t in tablenames 1765 if not t in joinont ] 1766 important_tablenames = joint + joinont + tables_to_merge.keys() 1767 excluded = [t for t in tablenames 1768 if not t in important_tablenames ] 1769 else: 1770 excluded = tablenames 1771 1772 if use_common_filters(query): 1773 query = self.common_filter(query,tablenames_for_common_filters) 1774 sql_w = ' WHERE ' + self.expand(query) if query else '' 1775 1776 if inner_join and not left: 1777 sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ 1778 itables_to_merge.keys()]) 1779 for t in ijoinon: 1780 sql_t += ' %s %s' % (icommand, t) 1781 elif not inner_join and left: 1782 sql_t = ', '.join([self.table_alias(t) for t in excluded + \ 1783 tables_to_merge.keys()]) 1784 if joint: 1785 sql_t += ' %s %s' % (command, 1786 ','.join([t for t in joint])) 1787 for t in joinon: 1788 sql_t += ' %s %s' % (command, t) 1789 elif inner_join and left: 1790 all_tables_in_query = set(important_tablenames + \ 1791 iimportant_tablenames + \ 1792 tablenames) 1793 tables_in_joinon = set(joinont + ijoinont) 1794 tables_not_in_joinon = \ 1795 all_tables_in_query.difference(tables_in_joinon) 1796 sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon]) 1797 for t in ijoinon: 1798 sql_t += ' %s %s' % (icommand, t) 1799 if joint: 1800 sql_t += ' %s %s' % (command, 1801 ','.join([t for t in joint])) 1802 for t in joinon: 1803 sql_t += ' %s %s' % (command, t) 1804 else: 1805 sql_t = ', '.join(self.table_alias(t) for t in tablenames) 1806 if groupby: 1807 if isinstance(groupby, (list, tuple)): 1808 groupby = xorify(groupby) 1809 sql_o += ' GROUP BY %s' % self.expand(groupby) 1810 if having: 1811 sql_o += ' HAVING %s' % attributes['having'] 1812 if orderby: 1813 if isinstance(orderby, (list, tuple)): 1814 orderby = xorify(orderby) 1815 if str(orderby) == '<random>': 1816 sql_o += ' ORDER BY %s' % self.RANDOM() 1817 else: 1818 sql_o += ' ORDER BY %s' % self.expand(orderby) 1819 if (limitby and not groupby and tablenames and orderby_on_limitby and not orderby): 1820 sql_o += ' ORDER BY %s' % ', '.join( 1821 [self.db[t].sqlsafe + '.' + self.db[t][x].sqlsafe_name for t in tablenames for x in ( 1822 hasattr(self.db[t], '_primarykey') and self.db[t]._primarykey 1823 or ['_id'] 1824 ) 1825 ] 1826 ) 1827 # oracle does not support limitby 1828 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1829 if for_update and self.can_select_for_update is True: 1830 sql = sql.rstrip(';') + ' FOR UPDATE;' 1831 return sql 1832
1833 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1834 if limitby: 1835 (lmin, lmax) = limitby 1836 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1837 return 'SELECT %s %s FROM %s%s%s;' % \ 1838 (sql_s, sql_f, sql_t, sql_w, sql_o)
1839
1840 - def _fetchall(self):
1841 return self.cursor.fetchall()
1842
1843 - def _select_aux(self,sql,fields,attributes):
1844 args_get = attributes.get 1845 cache = args_get('cache',None) 1846 if not cache: 1847 self.execute(sql) 1848 rows = self._fetchall() 1849 else: 1850 (cache_model, time_expire) = cache 1851 key = self.uri + '/' + sql + '/rows' 1852 if len(key)>200: key = hashlib_md5(key).hexdigest() 1853 def _select_aux2(): 1854 self.execute(sql) 1855 return self._fetchall()
1856 rows = cache_model(key,_select_aux2,time_expire) 1857 if isinstance(rows,tuple): 1858 rows = list(rows) 1859 limitby = args_get('limitby', None) or (0,) 1860 rows = self.rowslice(rows,limitby[0],None) 1861 processor = args_get('processor',self.parse) 1862 cacheable = args_get('cacheable',False) 1863 return processor(rows,fields,self._colnames,cacheable=cacheable) 1864
1865 - def select(self, query, fields, attributes):
1866 """ 1867 Always returns a Rows object, possibly empty. 1868 """ 1869 sql = self._select(query, fields, attributes) 1870 cache = attributes.get('cache', None) 1871 if cache and attributes.get('cacheable',False): 1872 del attributes['cache'] 1873 (cache_model, time_expire) = cache 1874 key = self.uri + '/' + sql 1875 if len(key)>200: key = hashlib_md5(key).hexdigest() 1876 args = (sql,fields,attributes) 1877 return cache_model( 1878 key, 1879 lambda self=self,args=args:self._select_aux(*args), 1880 time_expire) 1881 else: 1882 return self._select_aux(sql,fields,attributes)
1883
1884 - def _count(self, query, distinct=None):
1885 tablenames = self.tables(query) 1886 if query: 1887 if use_common_filters(query): 1888 query = self.common_filter(query, tablenames) 1889 sql_w = ' WHERE ' + self.expand(query) 1890 else: 1891 sql_w = '' 1892 sql_t = ','.join(self.table_alias(t) for t in tablenames) 1893 if distinct: 1894 if isinstance(distinct,(list, tuple)): 1895 distinct = xorify(distinct) 1896 sql_d = self.expand(distinct) 1897 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1898 (sql_d, sql_t, sql_w) 1899 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1900
1901 - def count(self, query, distinct=None):
1902 self.execute(self._count(query, distinct)) 1903 return self.cursor.fetchone()[0]
1904
1905 - def tables(self, *queries):
1906 tables = set() 1907 for query in queries: 1908 if isinstance(query, Field): 1909 tables.add(query.tablename) 1910 elif isinstance(query, (Expression, Query)): 1911 if not query.first is None: 1912 tables = tables.union(self.tables(query.first)) 1913 if not query.second is None: 1914 tables = tables.union(self.tables(query.second)) 1915 return list(tables)
1916
1917 - def commit(self):
1918 if self.connection: 1919 return self.connection.commit()
1920
1921 - def rollback(self):
1922 if self.connection: 1923 return self.connection.rollback()
1924
1925 - def close_connection(self):
1926 if self.connection: 1927 r = self.connection.close() 1928 self.connection = None 1929 return r
1930
1931 - def distributed_transaction_begin(self, key):
1932 return
1933
1934 - def prepare(self, key):
1935 if self.connection: self.connection.prepare()
1936
1937 - def commit_prepared(self, key):
1938 if self.connection: self.connection.commit()
1939
1940 - def rollback_prepared(self, key):
1941 if self.connection: self.connection.rollback()
1942
1943 - def concat_add(self, tablename):
1944 return ', ADD '
1945
1946 - def constraint_name(self, table, fieldname):
1947 return '%s_%s__constraint' % (table,fieldname)
1948
1949 - def create_sequence_and_triggers(self, query, table, **args):
1950 self.execute(query)
1951 1952
1953 - def log_execute(self, *a, **b):
1954 if not self.connection: raise ValueError(a[0]) 1955 if not self.connection: return None 1956 command = a[0] 1957 if hasattr(self,'filter_sql_command'): 1958 command = self.filter_sql_command(command) 1959 if self.db._debug: 1960 LOGGER.debug('SQL: %s' % command) 1961 self.db._lastsql = command 1962 t0 = time.time() 1963 ret = self.cursor.execute(command, *a[1:], **b) 1964 self.db._timings.append((command,time.time()-t0)) 1965 del self.db._timings[:-TIMINGSSIZE] 1966 return ret
1967
1968 - def execute(self, *a, **b):
1969 return self.log_execute(*a, **b)
1970
1971 - def represent(self, obj, fieldtype):
1972 field_is_type = fieldtype.startswith 1973 if isinstance(obj, CALLABLETYPES): 1974 obj = obj() 1975 if isinstance(fieldtype, SQLCustomType): 1976 value = fieldtype.encoder(obj) 1977 if fieldtype.type in ('string','text', 'json'): 1978 return self.adapt(value) 1979 return value 1980 if isinstance(obj, (Expression, Field)): 1981 return str(obj) 1982 if field_is_type('list:'): 1983 if not obj: 1984 obj = [] 1985 elif not isinstance(obj, (list, tuple)): 1986 obj = [obj] 1987 if field_is_type('list:string'): 1988 obj = map(str,obj) 1989 else: 1990 obj = map(int,[o for o in obj if o != '']) 1991 # we don't want to bar_encode json objects 1992 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1993 obj = bar_encode(obj) 1994 if obj is None: 1995 return 'NULL' 1996 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1997 return 'NULL' 1998 r = self.represent_exceptions(obj, fieldtype) 1999 if not r is None: 2000 return r 2001 if fieldtype == 'boolean': 2002 if obj and not str(obj)[:1].upper() in '0F': 2003 return self.smart_adapt(self.TRUE) 2004 else: 2005 return self.smart_adapt(self.FALSE) 2006 if fieldtype == 'id' or fieldtype == 'integer': 2007 return str(long(obj)) 2008 if field_is_type('decimal'): 2009 return str(obj) 2010 elif field_is_type('reference'): # reference 2011 # check for tablename first 2012 referenced = fieldtype[9:].strip() 2013 if referenced in self.db.tables: 2014 return str(long(obj)) 2015 p = referenced.partition('.') 2016 if p[2] != '': 2017 try: 2018 ftype = self.db[p[0]][p[2]].type 2019 return self.represent(obj, ftype) 2020 except (ValueError, KeyError): 2021 return repr(obj) 2022 elif isinstance(obj, (Row, Reference)): 2023 return str(obj['id']) 2024 return str(long(obj)) 2025 elif fieldtype == 'double': 2026 return repr(float(obj)) 2027 if isinstance(obj, unicode): 2028 obj = obj.encode(self.db_codec) 2029 if fieldtype == 'blob': 2030 obj = base64.b64encode(str(obj)) 2031 elif fieldtype == 'date': 2032 if isinstance(obj, (datetime.date, datetime.datetime)): 2033 obj = obj.isoformat()[:10] 2034 else: 2035 obj = str(obj) 2036 elif fieldtype == 'datetime': 2037 if isinstance(obj, datetime.datetime): 2038 obj = obj.isoformat(self.T_SEP)[:19] 2039 elif isinstance(obj, datetime.date): 2040 obj = obj.isoformat()[:10]+self.T_SEP+'00:00:00' 2041 else: 2042 obj = str(obj) 2043 elif fieldtype == 'time': 2044 if isinstance(obj, datetime.time): 2045 obj = obj.isoformat()[:10] 2046 else: 2047 obj = str(obj) 2048 elif fieldtype == 'json': 2049 if not self.native_json: 2050 if have_serializers: 2051 obj = serializers.json(obj) 2052 elif simplejson: 2053 obj = simplejson.dumps(obj) 2054 else: 2055 raise RuntimeError("missing simplejson") 2056 if not isinstance(obj,bytes): 2057 obj = bytes(obj) 2058 try: 2059 obj.decode(self.db_codec) 2060 except: 2061 obj = obj.decode('latin1').encode(self.db_codec) 2062 return self.adapt(obj)
2063
2064 - def represent_exceptions(self, obj, fieldtype):
2065 return None
2066
2067 - def lastrowid(self, table):
2068 return None
2069
2070 - def rowslice(self, rows, minimum=0, maximum=None):
2071 """ 2072 By default this function does nothing; 2073 overload when db does not do slicing. 2074 """ 2075 return rows
2076
2077 - def parse_value(self, value, field_type, blob_decode=True):
2078 if field_type != 'blob' and isinstance(value, str): 2079 try: 2080 value = value.decode(self.db._db_codec) 2081 except Exception: 2082 pass 2083 if isinstance(value, unicode): 2084 value = value.encode('utf-8') 2085 if isinstance(field_type, SQLCustomType): 2086 value = field_type.decoder(value) 2087 if not isinstance(field_type, str) or value is None: 2088 return value 2089 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 2090 return value 2091 elif field_type.startswith('geo'): 2092 return value 2093 elif field_type == 'blob' and not blob_decode: 2094 return value 2095 else: 2096 key = REGEX_TYPE.match(field_type).group(0) 2097 return self.parsemap[key](value,field_type)
2098
2099 - def parse_reference(self, value, field_type):
2100 referee = field_type[10:].strip() 2101 if not '.' in referee: 2102 value = Reference(value) 2103 value._table, value._record = self.db[referee], None 2104 return value
2105
2106 - def parse_boolean(self, value, field_type):
2107 return value == self.TRUE or str(value)[:1].lower() == 't'
2108
2109 - def parse_date(self, value, field_type):
2110 if isinstance(value, datetime.datetime): 2111 return value.date() 2112 if not isinstance(value, (datetime.date,datetime.datetime)): 2113 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 2114 value = datetime.date(y, m, d) 2115 return value
2116
2117 - def parse_time(self, value, field_type):
2118 if not isinstance(value, datetime.time): 2119 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 2120 if len(time_items) == 3: 2121 (h, mi, s) = time_items 2122 else: 2123 (h, mi, s) = time_items + [0] 2124 value = datetime.time(h, mi, s) 2125 return value
2126
2127 - def parse_datetime(self, value, field_type):
2128 if not isinstance(value, datetime.datetime): 2129 value = str(value) 2130 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 2131 if '+' in timezone: 2132 ms,tz = timezone.split('+') 2133 h,m = tz.split(':') 2134 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 2135 elif '-' in timezone: 2136 ms,tz = timezone.split('-') 2137 h,m = tz.split(':') 2138 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 2139 else: 2140 dt = None 2141 (y, m, d) = map(int,date_part.split('-')) 2142 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 2143 while len(time_parts)<3: time_parts.append(0) 2144 time_items = map(int,time_parts) 2145 (h, mi, s) = time_items 2146 value = datetime.datetime(y, m, d, h, mi, s) 2147 if dt: 2148 value = value + dt 2149 return value
2150
2151 - def parse_blob(self, value, field_type):
2152 return base64.b64decode(str(value))
2153
2154 - def parse_decimal(self, value, field_type):
2155 decimals = int(field_type[8:-1].split(',')[-1]) 2156 if self.dbengine in ('sqlite', 'spatialite'): 2157 value = ('%.' + str(decimals) + 'f') % value 2158 if not isinstance(value, decimal.Decimal): 2159 value = decimal.Decimal(str(value)) 2160 return value
2161
2162 - def parse_list_integers(self, value, field_type):
2163 if not isinstance(self, NoSQLAdapter): 2164 value = bar_decode_integer(value) 2165 return value
2166
2167 - def parse_list_references(self, value, field_type):
2168 if not isinstance(self, NoSQLAdapter): 2169 value = bar_decode_integer(value) 2170 return [self.parse_reference(r, field_type[5:]) for r in value]
2171
2172 - def parse_list_strings(self, value, field_type):
2173 if not isinstance(self, NoSQLAdapter): 2174 value = bar_decode_string(value) 2175 return value
2176
2177 - def parse_id(self, value, field_type):
2178 return long(value)
2179
2180 - def parse_integer(self, value, field_type):
2181 return long(value)
2182
2183 - def parse_double(self, value, field_type):
2184 return float(value)
2185
2186 - def parse_json(self, value, field_type):
2187 if not self.native_json: 2188 if not isinstance(value, basestring): 2189 raise RuntimeError('json data not a string') 2190 if isinstance(value, unicode): 2191 value = value.encode('utf-8') 2192 if have_serializers: 2193 value = serializers.loads_json(value) 2194 elif simplejson: 2195 value = simplejson.loads(value) 2196 else: 2197 raise RuntimeError("missing simplejson") 2198 return value
2199
2200 - def build_parsemap(self):
2201 self.parsemap = { 2202 'id':self.parse_id, 2203 'integer':self.parse_integer, 2204 'bigint':self.parse_integer, 2205 'float':self.parse_double, 2206 'double':self.parse_double, 2207 'reference':self.parse_reference, 2208 'boolean':self.parse_boolean, 2209 'date':self.parse_date, 2210 'time':self.parse_time, 2211 'datetime':self.parse_datetime, 2212 'blob':self.parse_blob, 2213 'decimal':self.parse_decimal, 2214 'json':self.parse_json, 2215 'list:integer':self.parse_list_integers, 2216 'list:reference':self.parse_list_references, 2217 'list:string':self.parse_list_strings, 2218 }
2219
2220 - def parse(self, rows, fields, colnames, blob_decode=True, 2221 cacheable = False):
2222 db = self.db 2223 virtualtables = [] 2224 new_rows = [] 2225 tmps = [] 2226 for colname in colnames: 2227 col_m = self.REGEX_TABLE_DOT_FIELD.match(colname) 2228 if not col_m: 2229 tmps.append(None) 2230 else: 2231 tablename, fieldname = col_m.groups() 2232 table = db[tablename] 2233 field = table[fieldname] 2234 ft = field.type 2235 tmps.append((tablename, fieldname, table, field, ft)) 2236 for (i,row) in enumerate(rows): 2237 new_row = Row() 2238 for (j,colname) in enumerate(colnames): 2239 value = row[j] 2240 tmp = tmps[j] 2241 if tmp: 2242 (tablename,fieldname,table,field,ft) = tmp 2243 colset = new_row.get(tablename, None) 2244 if colset is None: 2245 colset = new_row[tablename] = Row() 2246 if tablename not in virtualtables: 2247 virtualtables.append(tablename) 2248 value = self.parse_value(value,ft,blob_decode) 2249 if field.filter_out: 2250 value = field.filter_out(value) 2251 colset[fieldname] = value 2252 2253 # for backward compatibility 2254 if ft=='id' and fieldname!='id' and \ 2255 not 'id' in table.fields: 2256 colset['id'] = value 2257 2258 if ft == 'id' and not cacheable: 2259 # temporary hack to deal with 2260 # GoogleDatastoreAdapter 2261 # references 2262 if isinstance(self, GoogleDatastoreAdapter): 2263 id = value.key.id() if self.use_ndb else value.key().id_or_name() 2264 colset[fieldname] = id 2265 colset.gae_item = value 2266 else: 2267 id = value 2268 colset.update_record = RecordUpdater(colset,table,id) 2269 colset.delete_record = RecordDeleter(table,id) 2270 if table._db._lazy_tables: 2271 colset['__get_lazy_reference__'] = LazyReferenceGetter(table, id) 2272 for rfield in table._referenced_by: 2273 referee_link = db._referee_name and \ 2274 db._referee_name % dict( 2275 table=rfield.tablename,field=rfield.name) 2276 if referee_link and not referee_link in colset: 2277 colset[referee_link] = LazySet(rfield,id) 2278 else: 2279 if not '_extra' in new_row: 2280 new_row['_extra'] = Row() 2281 new_row['_extra'][colname] = \ 2282 self.parse_value(value, 2283 fields[j].type,blob_decode) 2284 new_column_name = \ 2285 REGEX_SELECT_AS_PARSER.search(colname) 2286 if not new_column_name is None: 2287 column_name = new_column_name.groups(0) 2288 setattr(new_row,column_name[0],value) 2289 new_rows.append(new_row) 2290 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2291 2292 2293 for tablename in virtualtables: 2294 table = db[tablename] 2295 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2296 if isinstance(v,FieldVirtual)] 2297 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2298 if isinstance(v,FieldMethod)] 2299 if fields_virtual or fields_lazy: 2300 for row in rowsobj.records: 2301 box = row[tablename] 2302 for f,v in fields_virtual: 2303 try: 2304 box[f] = v.f(row) 2305 except AttributeError: 2306 pass # not enough fields to define virtual field 2307 for f,v in fields_lazy: 2308 try: 2309 box[f] = (v.handler or VirtualCommand)(v.f,row) 2310 except AttributeError: 2311 pass # not enough fields to define virtual field 2312 2313 ### old style virtual fields 2314 for item in table.virtualfields: 2315 try: 2316 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2317 except (KeyError, AttributeError): 2318 # to avoid breaking virtualfields when partial select 2319 pass 2320 return rowsobj
2321
2322 - def common_filter(self, query, tablenames):
2323 tenant_fieldname = self.db._request_tenant 2324 2325 for tablename in tablenames: 2326 table = self.db[tablename] 2327 2328 # deal with user provided filters 2329 if table._common_filter != None: 2330 query = query & table._common_filter(query) 2331 2332 # deal with multi_tenant filters 2333 if tenant_fieldname in table: 2334 default = table[tenant_fieldname].default 2335 if not default is None: 2336 newquery = table[tenant_fieldname] == default 2337 if query is None: 2338 query = newquery 2339 else: 2340 query = query & newquery 2341 return query
2342
2343 - def CASE(self,query,t,f):
2344 def represent(x): 2345 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2346 if x is None: return 'NULL' 2347 elif isinstance(x,Expression): return str(x) 2348 else: return self.represent(x,types.get(type(x),'string'))
2349 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2350 (self.expand(query),represent(t),represent(f))) 2351
2352 - def sqlsafe_table(self, tablename, ot=None):
2353 if ot is not None: 2354 return ('%s AS ' + self.QUOTE_TEMPLATE) % (ot, tablename) 2355 return self.QUOTE_TEMPLATE % tablename
2356
2357 - def sqlsafe_field(self, fieldname):
2358 return self.QUOTE_TEMPLATE % fieldname
2359
2360 ################################################################################### 2361 # List of all the available adapters; they all extend BaseAdapter. 2362 ################################################################################### 2363 2364 -class SQLiteAdapter(BaseAdapter):
2365 drivers = ('sqlite2','sqlite3') 2366 2367 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2368
2369 - def EXTRACT(self,field,what):
2370 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2371 2372 @staticmethod
2373 - def web2py_extract(lookup, s):
2374 table = { 2375 'year': (0, 4), 2376 'month': (5, 7), 2377 'day': (8, 10), 2378 'hour': (11, 13), 2379 'minute': (14, 16), 2380 'second': (17, 19), 2381 } 2382 try: 2383 if lookup != 'epoch': 2384 (i, j) = table[lookup] 2385 return int(s[i:j]) 2386 else: 2387 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2388 except: 2389 return None
2390 2391 @staticmethod
2392 - def web2py_regexp(expression, item):
2393 return re.compile(expression).search(item) is not None
2394
2395 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2396 credential_decoder=IDENTITY, driver_args={}, 2397 adapter_args={}, do_connect=True, after_connection=None):
2398 self.db = db 2399 self.dbengine = "sqlite" 2400 self.uri = uri 2401 self.adapter_args = adapter_args 2402 if do_connect: self.find_driver(adapter_args) 2403 self.pool_size = 0 2404 self.folder = folder 2405 self.db_codec = db_codec 2406 self._after_connection = after_connection 2407 self.find_or_make_work_folder() 2408 path_encoding = sys.getfilesystemencoding() \ 2409 or locale.getdefaultlocale()[1] or 'utf8' 2410 if uri.startswith('sqlite:memory'): 2411 self.dbpath = ':memory:' 2412 else: 2413 self.dbpath = uri.split('://',1)[1] 2414 if self.dbpath[0] != '/': 2415 if PYTHON_VERSION[0] == 2: 2416 self.dbpath = pjoin( 2417 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2418 else: 2419 self.dbpath = pjoin(self.folder, self.dbpath) 2420 if not 'check_same_thread' in driver_args: 2421 driver_args['check_same_thread'] = False 2422 if not 'detect_types' in driver_args and do_connect: 2423 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2424 def connector(dbpath=self.dbpath, driver_args=driver_args): 2425 return self.driver.Connection(dbpath, **driver_args)
2426 self.connector = connector 2427 if do_connect: self.reconnect()
2428
2429 - def after_connection(self):
2430 self.connection.create_function('web2py_extract', 2, 2431 SQLiteAdapter.web2py_extract) 2432 self.connection.create_function("REGEXP", 2, 2433 SQLiteAdapter.web2py_regexp) 2434 2435 if self.adapter_args.get('foreign_keys',True): 2436 self.execute('PRAGMA foreign_keys=ON;')
2437
2438 - def _truncate(self, table, mode=''):
2439 tablename = table._tablename 2440 return ['DELETE FROM %s;' % tablename, 2441 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2442
2443 - def lastrowid(self, table):
2444 return self.cursor.lastrowid
2445
2446 - def REGEXP(self,first,second):
2447 return '(%s REGEXP %s)' % (self.expand(first), 2448 self.expand(second,'string'))
2449
2450 - def select(self, query, fields, attributes):
2451 """ 2452 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2453 Note that the entire database, rather than one record, is locked 2454 (it will be locked eventually anyway by the following UPDATE). 2455 """ 2456 if attributes.get('for_update', False) and not 'cache' in attributes: 2457 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2458 return super(