Returns an EXISTS clause for the dataset as an SQL::PlaceholderLiteralString.
DB.select(1).where(DB[:items].exists) # SELECT 1 WHERE (EXISTS (SELECT * FROM items))
# File lib/sequel/dataset/sql.rb, line 12 12: def exists 13: SQL::PlaceholderLiteralString.new(EXISTS, [self], true) 14: end
Returns an INSERT SQL query string. See insert.
DB[:items].insert_sql(:a=>1) # => "INSERT INTO items (a) VALUES (1)"
# File lib/sequel/dataset/sql.rb, line 20 20: def insert_sql(*values) 21: return static_sql(@opts[:sql]) if @opts[:sql] 22: 23: check_modification_allowed! 24: 25: columns = [] 26: 27: case values.size 28: when 0 29: return insert_sql({}) 30: when 1 31: case vals = values.at(0) 32: when Hash 33: values = [] 34: vals.each do |k,v| 35: columns << k 36: values << v 37: end 38: when Dataset, Array, LiteralString 39: values = vals 40: end 41: when 2 42: if (v0 = values.at(0)).is_a?(Array) && ((v1 = values.at(1)).is_a?(Array) || v1.is_a?(Dataset) || v1.is_a?(LiteralString)) 43: columns, values = v0, v1 44: raise(Error, "Different number of values and columns given to insert_sql") if values.is_a?(Array) and columns.length != values.length 45: end 46: end 47: 48: if values.is_a?(Array) && values.empty? && !insert_supports_empty_values? 49: columns = [columns().last] 50: values = [DEFAULT] 51: end 52: clone(:columns=>columns, :values=>values).send(:_insert_sql) 53: end
Append a literal representation of a value to the given SQL string.
If an unsupported object is given, an Error is raised.
# File lib/sequel/dataset/sql.rb, line 58 58: def literal_append(sql, v) 59: case v 60: when Symbol 61: if skip_symbol_cache? 62: literal_symbol_append(sql, v) 63: else 64: unless l = db.literal_symbol(v) 65: l = '' 66: literal_symbol_append(l, v) 67: db.literal_symbol_set(v, l) 68: end 69: sql << l 70: end 71: when String 72: case v 73: when LiteralString 74: sql << v 75: when SQL::Blob 76: literal_blob_append(sql, v) 77: else 78: literal_string_append(sql, v) 79: end 80: when Integer 81: sql << literal_integer(v) 82: when Hash 83: literal_hash_append(sql, v) 84: when SQL::Expression 85: literal_expression_append(sql, v) 86: when Float 87: sql << literal_float(v) 88: when BigDecimal 89: sql << literal_big_decimal(v) 90: when NilClass 91: sql << literal_nil 92: when TrueClass 93: sql << literal_true 94: when FalseClass 95: sql << literal_false 96: when Array 97: literal_array_append(sql, v) 98: when Time 99: v.is_a?(SQLTime) ? literal_sqltime_append(sql, v) : literal_time_append(sql, v) 100: when DateTime 101: literal_datetime_append(sql, v) 102: when Date 103: sql << literal_date(v) 104: when Dataset 105: literal_dataset_append(sql, v) 106: else 107: literal_other_append(sql, v) 108: end 109: end
Returns an array of insert statements for inserting multiple records. This method is used by multi_insert to format insert statements and expects a keys array and and an array of value arrays.
This method should be overridden by descendants if the support inserting multiple records in a single SQL statement.
# File lib/sequel/dataset/sql.rb, line 117 117: def multi_insert_sql(columns, values) 118: case multi_insert_sql_strategy 119: when :values 120: sql = LiteralString.new('VALUES ') 121: expression_list_append(sql, values.map{|r| Array(r)}) 122: [insert_sql(columns, sql)] 123: when :union 124: c = false 125: sql = LiteralString.new('') 126: u = UNION_ALL_SELECT 127: f = empty_from_sql 128: values.each do |v| 129: if c 130: sql << u 131: else 132: sql << SELECT << SPACE 133: c = true 134: end 135: expression_list_append(sql, v) 136: sql << f if f 137: end 138: [insert_sql(columns, sql)] 139: else 140: values.map{|r| insert_sql(columns, r)} 141: end 142: end
Same as select_sql, not aliased directly to make subclassing simpler.
# File lib/sequel/dataset/sql.rb, line 145 145: def sql 146: select_sql 147: end
Returns a TRUNCATE SQL query string. See truncate
DB[:items].truncate_sql # => 'TRUNCATE items'
# File lib/sequel/dataset/sql.rb, line 152 152: def truncate_sql 153: if opts[:sql] 154: static_sql(opts[:sql]) 155: else 156: check_truncation_allowed! 157: raise(InvalidOperation, "Can't truncate filtered datasets") if opts[:where] || opts[:having] 158: t = '' 159: source_list_append(t, opts[:from]) 160: _truncate_sql(t) 161: end 162: end
Formats an UPDATE statement using the given values. See update.
DB[:items].update_sql(:price => 100, :category => 'software') # => "UPDATE items SET price = 100, category = 'software'
Raises an Error if the dataset is grouped or includes more than one table.
# File lib/sequel/dataset/sql.rb, line 171 171: def update_sql(values = OPTS) 172: return static_sql(opts[:sql]) if opts[:sql] 173: check_modification_allowed! 174: clone(:values=>values).send(:_update_sql) 175: end
These methods, while public, are not designed to be used directly by the end user.
EMULATED_FUNCTION_MAP | = | {} | Map of emulated function names to native function names. | |
WILDCARD | = | LiteralString.new('*').freeze | ||
ALL | = | ' ALL'.freeze | ||
AND_SEPARATOR | = | " AND ".freeze | ||
APOS | = | "'".freeze | ||
APOS_RE | = | /'/.freeze | ||
ARRAY_EMPTY | = | '(NULL)'.freeze | ||
AS | = | ' AS '.freeze | ||
ASC | = | ' ASC'.freeze | ||
BACKSLASH | = | "\\".freeze | ||
BITCOMP_CLOSE | = | ") - 1)".freeze | ||
BITCOMP_OPEN | = | "((0 - ".freeze | ||
BITWISE_METHOD_MAP | = | {:& =>:BITAND, :| => :BITOR, :^ => :BITXOR} | ||
BOOL_FALSE | = | "'f'".freeze | ||
BOOL_TRUE | = | "'t'".freeze | ||
BRACKET_CLOSE | = | ']'.freeze | ||
BRACKET_OPEN | = | '['.freeze | ||
CASE_ELSE | = | " ELSE ".freeze | ||
CASE_END | = | " END)".freeze | ||
CASE_OPEN | = | '(CASE'.freeze | ||
CASE_THEN | = | " THEN ".freeze | ||
CASE_WHEN | = | " WHEN ".freeze | ||
CAST_OPEN | = | 'CAST('.freeze | ||
COLON | = | ':'.freeze | ||
COLUMN_REF_RE1 | = | Sequel::COLUMN_REF_RE1 | ||
COLUMN_REF_RE2 | = | Sequel::COLUMN_REF_RE2 | ||
COLUMN_REF_RE3 | = | Sequel::COLUMN_REF_RE3 | ||
COMMA | = | ', '.freeze | ||
COMMA_SEPARATOR | = | COMMA | ||
CONDITION_FALSE | = | '(1 = 0)'.freeze | ||
CONDITION_TRUE | = | '(1 = 1)'.freeze | ||
COUNT_FROM_SELF_OPTS | = | [:distinct, :group, :sql, :limit, :offset, :compounds] | ||
COUNT_OF_ALL_AS_COUNT | = | SQL::Function.new(:count, WILDCARD).as(:count) | ||
DATASET_ALIAS_BASE_NAME | = | 't'.freeze | ||
DEFAULT | = | LiteralString.new('DEFAULT').freeze | ||
DEFAULT_VALUES | = | " DEFAULT VALUES".freeze | ||
DELETE | = | 'DELETE'.freeze | ||
DESC | = | ' DESC'.freeze | ||
DISTINCT | = | " DISTINCT".freeze | ||
DOT | = | '.'.freeze | ||
DOUBLE_APOS | = | "''".freeze | ||
DOUBLE_QUOTE | = | '""'.freeze | ||
EQUAL | = | ' = '.freeze | ||
ESCAPE | = | " ESCAPE ".freeze | ||
EXTRACT | = | 'extract('.freeze | ||
EXISTS | = | ['EXISTS '.freeze].freeze | ||
FILTER | = | " FILTER (WHERE ".freeze | ||
FOR_UPDATE | = | ' FOR UPDATE'.freeze | ||
FORMAT_DATE | = | "'%Y-%m-%d'".freeze | ||
FORMAT_DATE_STANDARD | = | "DATE '%Y-%m-%d'".freeze | ||
FORMAT_OFFSET | = | "%+03i%02i".freeze | ||
FORMAT_TIMESTAMP_RE | = | /%[Nz]/.freeze | ||
FORMAT_USEC | = | '%N'.freeze | ||
FRAME_ALL | = | "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING".freeze | ||
FRAME_ROWS | = | "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW".freeze | ||
FROM | = | ' FROM '.freeze | ||
FUNCTION_DISTINCT | = | "DISTINCT ".freeze | ||
GROUP_BY | = | " GROUP BY ".freeze | ||
HAVING | = | " HAVING ".freeze | ||
INSERT | = | "INSERT".freeze | ||
INTO | = | " INTO ".freeze | ||
IS_LITERALS | = | {nil=>'NULL'.freeze, true=>'TRUE'.freeze, false=>'FALSE'.freeze}.freeze | ||
IS_OPERATORS | = | ::Sequel::SQL::ComplexExpression::IS_OPERATORS | ||
LATERAL | = | 'LATERAL '.freeze | ||
LIKE_OPERATORS | = | ::Sequel::SQL::ComplexExpression::LIKE_OPERATORS | ||
LIMIT | = | " LIMIT ".freeze | ||
N_ARITY_OPERATORS | = | ::Sequel::SQL::ComplexExpression::N_ARITY_OPERATORS | ||
NOT_SPACE | = | 'NOT '.freeze | ||
NULL | = | "NULL".freeze | ||
NULLS_FIRST | = | " NULLS FIRST".freeze | ||
NULLS_LAST | = | " NULLS LAST".freeze | ||
OFFSET | = | " OFFSET ".freeze | ||
ON | = | ' ON '.freeze | ||
ON_PAREN | = | " ON (".freeze | ||
ORDER_BY | = | " ORDER BY ".freeze | ||
ORDER_BY_NS | = | "ORDER BY ".freeze | ||
OVER | = | ' OVER '.freeze | ||
PAREN_CLOSE | = | ')'.freeze | ||
PAREN_OPEN | = | '('.freeze | ||
PAREN_SPACE_OPEN | = | ' ('.freeze | ||
PARTITION_BY | = | "PARTITION BY ".freeze | ||
QUALIFY_KEYS | = | [:select, :where, :having, :order, :group] | ||
QUESTION_MARK | = | '?'.freeze | ||
QUESTION_MARK_RE | = | /\?/.freeze | ||
QUOTE | = | '"'.freeze | ||
QUOTE_RE | = | /"/.freeze | ||
RETURNING | = | " RETURNING ".freeze | ||
SELECT | = | 'SELECT'.freeze | ||
SET | = | ' SET '.freeze | ||
SPACE | = | ' '.freeze | ||
SQL_WITH | = | "WITH ".freeze | ||
SPACE_WITH | = | " WITH ".freeze | ||
TILDE | = | '~'.freeze | ||
TIMESTAMP_FORMAT | = | "'%Y-%m-%d %H:%M:%S%N%z'".freeze | ||
STANDARD_TIMESTAMP_FORMAT | = | "TIMESTAMP #{TIMESTAMP_FORMAT}".freeze | ||
TWO_ARITY_OPERATORS | = | ::Sequel::SQL::ComplexExpression::TWO_ARITY_OPERATORS | ||
REGEXP_OPERATORS | = | ::Sequel::SQL::ComplexExpression::REGEXP_OPERATORS | ||
UNDERSCORE | = | '_'.freeze | ||
UPDATE | = | 'UPDATE'.freeze | ||
USING | = | ' USING ('.freeze | ||
UNION_ALL_SELECT | = | ' UNION ALL SELECT '.freeze | ||
VALUES | = | " VALUES ".freeze | ||
WHERE | = | " WHERE ".freeze | ||
WITH_ORDINALITY | = | " WITH ORDINALITY".freeze | ||
WITHIN_GROUP | = | " WITHIN GROUP (ORDER BY ".freeze | ||
DATETIME_SECFRACTION_ARG | = | RUBY_VERSION >= '1.9.0' ? 1000000 : 86400000000 |
Define a dataset literalization method for the given type in the given module, using the given clauses.
Arguments:
mod : | Module in which to define method |
type : | Type of SQL literalization method to create, either :select, :insert, :update, or :delete |
clauses : | array of clauses that make up the SQL query for the type. This can either be a single array of symbols/strings, or it can be an array of pairs, with the first element in each pair being an if/elsif/else code fragment, and the second element in each pair being an array of symbol/strings for the appropriate branch. |
# File lib/sequel/dataset/sql.rb, line 198 198: def self.def_sql_method(mod, type, clauses) 199: priv = type == :update || type == :insert 200: 201: lines = [] 202: lines << 'private' if priv 203: lines << "def #{'_' if priv}#{type}_sql" 204: lines << 'if sql = opts[:sql]; return static_sql(sql) end' unless priv 205: lines << 'check_modification_allowed!' if type == :delete 206: lines << 'sql = @opts[:append_sql] || sql_string_origin' 207: 208: if clauses.all?{|c| c.is_a?(Array)} 209: clauses.each do |i, cs| 210: lines << i 211: lines.concat(clause_methods(type, cs).map{|x| "#{x}(sql)"}) 212: end 213: lines << 'end' 214: else 215: lines.concat(clause_methods(type, clauses).map{|x| "#{x}(sql)"}) 216: end 217: 218: lines << 'sql' 219: lines << 'end' 220: 221: mod.class_eval lines.join("\n"), __FILE__, __LINE__ 222: end
Append literalization of boolean constant to SQL string.
# File lib/sequel/dataset/sql.rb, line 368 368: def boolean_constant_sql_append(sql, constant) 369: if (constant == true || constant == false) && !supports_where_true? 370: sql << (constant == true ? CONDITION_TRUE : CONDITION_FALSE) 371: else 372: literal_append(sql, constant) 373: end 374: end
Append literalization of case expression to SQL string.
# File lib/sequel/dataset/sql.rb, line 377 377: def case_expression_sql_append(sql, ce) 378: sql << CASE_OPEN 379: if ce.expression? 380: sql << SPACE 381: literal_append(sql, ce.expression) 382: end 383: w = CASE_WHEN 384: t = CASE_THEN 385: ce.conditions.each do |c,r| 386: sql << w 387: literal_append(sql, c) 388: sql << t 389: literal_append(sql, r) 390: end 391: sql << CASE_ELSE 392: literal_append(sql, ce.default) 393: sql << CASE_END 394: end
Append literalization of complex expression to SQL string.
# File lib/sequel/dataset/sql.rb, line 410 410: def complex_expression_sql_append(sql, op, args) 411: case op 412: when *IS_OPERATORS 413: r = args.at(1) 414: if r.nil? || supports_is_true? 415: raise(InvalidOperation, 'Invalid argument used for IS operator') unless val = IS_LITERALS[r] 416: sql << PAREN_OPEN 417: literal_append(sql, args.at(0)) 418: sql << SPACE << op.to_s << SPACE 419: sql << val << PAREN_CLOSE 420: elsif op == :IS 421: complex_expression_sql_append(sql, "=""=", args) 422: else 423: complex_expression_sql_append(sql, :OR, [SQL::BooleanExpression.new("!=""!=", *args), SQL::BooleanExpression.new(:IS, args.at(0), nil)]) 424: end 425: when :IN, "NOT IN""NOT IN" 426: cols = args.at(0) 427: vals = args.at(1) 428: col_array = true if cols.is_a?(Array) 429: if vals.is_a?(Array) 430: val_array = true 431: empty_val_array = vals == [] 432: end 433: if empty_val_array 434: literal_append(sql, empty_array_value(op, cols)) 435: elsif col_array 436: if !supports_multiple_column_in? 437: if val_array 438: expr = SQL::BooleanExpression.new(:OR, *vals.to_a.map{|vs| SQL::BooleanExpression.from_value_pairs(cols.to_a.zip(vs).map{|c, v| [c, v]})}) 439: literal_append(sql, op == :IN ? expr : ~expr) 440: else 441: old_vals = vals 442: vals = vals.naked if vals.is_a?(Sequel::Dataset) 443: vals = vals.to_a 444: val_cols = old_vals.columns 445: complex_expression_sql_append(sql, op, [cols, vals.map!{|x| x.values_at(*val_cols)}]) 446: end 447: else 448: # If the columns and values are both arrays, use array_sql instead of 449: # literal so that if values is an array of two element arrays, it 450: # will be treated as a value list instead of a condition specifier. 451: sql << PAREN_OPEN 452: literal_append(sql, cols) 453: sql << SPACE << op.to_s << SPACE 454: if val_array 455: array_sql_append(sql, vals) 456: else 457: literal_append(sql, vals) 458: end 459: sql << PAREN_CLOSE 460: end 461: else 462: sql << PAREN_OPEN 463: literal_append(sql, cols) 464: sql << SPACE << op.to_s << SPACE 465: literal_append(sql, vals) 466: sql << PAREN_CLOSE 467: end 468: when :LIKE, 'NOT LIKE''NOT LIKE' 469: sql << PAREN_OPEN 470: literal_append(sql, args.at(0)) 471: sql << SPACE << op.to_s << SPACE 472: literal_append(sql, args.at(1)) 473: sql << ESCAPE 474: literal_append(sql, BACKSLASH) 475: sql << PAREN_CLOSE 476: when :ILIKE, 'NOT ILIKE''NOT ILIKE' 477: complex_expression_sql_append(sql, (op == :ILIKE ? :LIKE : "NOT LIKE""NOT LIKE"), args.map{|v| Sequel.function(:UPPER, v)}) 478: when *TWO_ARITY_OPERATORS 479: if REGEXP_OPERATORS.include?(op) && !supports_regexp? 480: raise InvalidOperation, "Pattern matching via regular expressions is not supported on #{db.database_type}" 481: end 482: sql << PAREN_OPEN 483: literal_append(sql, args.at(0)) 484: sql << SPACE << op.to_s << SPACE 485: literal_append(sql, args.at(1)) 486: sql << PAREN_CLOSE 487: when *N_ARITY_OPERATORS 488: sql << PAREN_OPEN 489: c = false 490: op_str = " #{op} " 491: args.each do |a| 492: sql << op_str if c 493: literal_append(sql, a) 494: c ||= true 495: end 496: sql << PAREN_CLOSE 497: when :NOT 498: sql << NOT_SPACE 499: literal_append(sql, args.at(0)) 500: when :NOOP 501: literal_append(sql, args.at(0)) 502: when 'B~''B~' 503: sql << TILDE 504: literal_append(sql, args.at(0)) 505: when :extract 506: sql << EXTRACT << args.at(0).to_s << FROM 507: literal_append(sql, args.at(1)) 508: sql << PAREN_CLOSE 509: else 510: raise(InvalidOperation, "invalid operator #{op}") 511: end 512: end
Append literalization of delayed evaluation to SQL string, causing the delayed evaluation proc to be evaluated.
# File lib/sequel/dataset/sql.rb, line 521 521: def delayed_evaluation_sql_append(sql, delay) 522: if recorder = @opts[:placeholder_literalizer] 523: recorder.use(sql, lambda{delay.call(self)}, nil) 524: else 525: literal_append(sql, delay.call(self)) 526: end 527: end
Append literalization of function call to SQL string.
# File lib/sequel/dataset/sql.rb, line 530 530: def function_sql_append(sql, f) 531: name = f.name 532: opts = f.opts 533: 534: if opts[:emulate] 535: if emulate_function?(name) 536: emulate_function_sql_append(sql, f) 537: return 538: end 539: 540: name = native_function_name(name) 541: end 542: 543: sql << LATERAL if opts[:lateral] 544: 545: case name 546: when SQL::Identifier 547: if supports_quoted_function_names? && opts[:quoted] != false 548: literal_append(sql, name) 549: else 550: sql << name.value.to_s 551: end 552: when SQL::QualifiedIdentifier 553: if supports_quoted_function_names? && opts[:quoted] != false 554: literal_append(sql, name) 555: else 556: sql << split_qualifiers(name).join(DOT) 557: end 558: else 559: if supports_quoted_function_names? && opts[:quoted] 560: quote_identifier_append(sql, name) 561: else 562: sql << name.to_s 563: end 564: end 565: 566: sql << PAREN_OPEN 567: if opts[:*] 568: sql << WILDCARD 569: else 570: sql << FUNCTION_DISTINCT if opts[:distinct] 571: expression_list_append(sql, f.args) 572: end 573: sql << PAREN_CLOSE 574: 575: if group = opts[:within_group] 576: sql << WITHIN_GROUP 577: expression_list_append(sql, group) 578: sql << PAREN_CLOSE 579: end 580: 581: if filter = opts[:filter] 582: sql << FILTER 583: literal_append(sql, filter_expr(filter, &opts[:filter_block])) 584: sql << PAREN_CLOSE 585: end 586: 587: if window = opts[:over] 588: sql << OVER 589: window_sql_append(sql, window.opts) 590: end 591: 592: if opts[:with_ordinality] 593: sql << WITH_ORDINALITY 594: end 595: end
Append literalization of JOIN clause without ON or USING to SQL string.
# File lib/sequel/dataset/sql.rb, line 598 598: def join_clause_sql_append(sql, jc) 599: table = jc.table 600: table_alias = jc.table_alias 601: table_alias = nil if table == table_alias && !jc.column_aliases 602: sql << SPACE << join_type_sql(jc.join_type) << SPACE 603: identifier_append(sql, table) 604: as_sql_append(sql, table_alias, jc.column_aliases) if table_alias 605: end
Append literalization of ordered expression to SQL string.
# File lib/sequel/dataset/sql.rb, line 629 629: def ordered_expression_sql_append(sql, oe) 630: literal_append(sql, oe.expression) 631: sql << (oe.descending ? DESC : ASC) 632: case oe.nulls 633: when :first 634: sql << NULLS_FIRST 635: when :last 636: sql << NULLS_LAST 637: end 638: end
Append literalization of placeholder literal string to SQL string.
# File lib/sequel/dataset/sql.rb, line 641 641: def placeholder_literal_string_sql_append(sql, pls) 642: args = pls.args 643: str = pls.str 644: sql << PAREN_OPEN if pls.parens 645: if args.is_a?(Hash) 646: if args.empty? 647: sql << str 648: else 649: re = /:(#{args.keys.map{|k| Regexp.escape(k.to_s)}.join('|')})\b/ 650: loop do 651: previous, q, str = str.partition(re) 652: sql << previous 653: literal_append(sql, args[($1||q[1..-1].to_s).to_sym]) unless q.empty? 654: break if str.empty? 655: end 656: end 657: elsif str.is_a?(Array) 658: len = args.length 659: str.each_with_index do |s, i| 660: sql << s 661: literal_append(sql, args[i]) unless i == len 662: end 663: unless str.length == args.length || str.length == args.length + 1 664: raise Error, "Mismatched number of placeholders (#{str.length}) and placeholder arguments (#{args.length}) when using placeholder array" 665: end 666: else 667: i = -1 668: match_len = args.length - 1 669: loop do 670: previous, q, str = str.partition(QUESTION_MARK) 671: sql << previous 672: literal_append(sql, args.at(i+=1)) unless q.empty? 673: if str.empty? 674: unless i == match_len 675: raise Error, "Mismatched number of placeholders (#{i+1}) and placeholder arguments (#{args.length}) when using placeholder array" 676: end 677: break 678: end 679: end 680: end 681: sql << PAREN_CLOSE if pls.parens 682: end
Append literalization of qualified identifier to SQL string. If 3 arguments are given, the 2nd should be the table/qualifier and the third should be column/qualified. If 2 arguments are given, the 2nd should be an SQL::QualifiedIdentifier.
# File lib/sequel/dataset/sql.rb, line 687 687: def qualified_identifier_sql_append(sql, table, column=(c = table.column; table = table.table; c)) 688: identifier_append(sql, table) 689: sql << DOT 690: identifier_append(sql, column) 691: end
Append literalization of unqualified identifier to SQL string. Adds quoting to identifiers (columns and tables). If identifiers are not being quoted, returns name as a string. If identifiers are being quoted quote the name with quoted_identifier.
# File lib/sequel/dataset/sql.rb, line 697 697: def quote_identifier_append(sql, name) 698: if name.is_a?(LiteralString) 699: sql << name 700: else 701: name = name.value if name.is_a?(SQL::Identifier) 702: name = input_identifier(name) 703: if quote_identifiers? 704: quoted_identifier_append(sql, name) 705: else 706: sql << name 707: end 708: end 709: end
Append literalization of identifier or unqualified identifier to SQL string.
# File lib/sequel/dataset/sql.rb, line 712 712: def quote_schema_table_append(sql, table) 713: schema, table = schema_and_table(table) 714: if schema 715: quote_identifier_append(sql, schema) 716: sql << DOT 717: end 718: quote_identifier_append(sql, table) 719: end
Append literalization of quoted identifier to SQL string. This method quotes the given name with the SQL standard double quote. should be overridden by subclasses to provide quoting not matching the SQL standard, such as backtick (used by MySQL and SQLite).
# File lib/sequel/dataset/sql.rb, line 725 725: def quoted_identifier_append(sql, name) 726: sql << QUOTE << name.to_s.gsub(QUOTE_RE, DOUBLE_QUOTE) << QUOTE 727: end
Split the schema information from the table, returning two strings, one for the schema and one for the table. The returned schema may be nil, but the table will always have a string value.
Note that this function does not handle tables with more than one level of qualification (e.g. database.schema.table on Microsoft SQL Server).
# File lib/sequel/dataset/sql.rb, line 736 736: def schema_and_table(table_name, sch=nil) 737: sch = sch.to_s if sch 738: case table_name 739: when Symbol 740: s, t, _ = split_symbol(table_name) 741: [s||sch, t] 742: when SQL::QualifiedIdentifier 743: [table_name.table.to_s, table_name.column.to_s] 744: when SQL::Identifier 745: [sch, table_name.value.to_s] 746: when String 747: [sch, table_name] 748: else 749: raise Error, 'table_name should be a Symbol, SQL::QualifiedIdentifier, SQL::Identifier, or String' 750: end 751: end
Splits table_name into an array of strings.
ds.split_qualifiers(:s) # ['s'] ds.split_qualifiers(:t__s) # ['t', 's'] ds.split_qualifiers(Sequel.qualify(:d, :t__s)) # ['d', 't', 's'] ds.split_qualifiers(Sequel.qualify(:h__d, :t__s)) # ['h', 'd', 't', 's']
# File lib/sequel/dataset/sql.rb, line 759 759: def split_qualifiers(table_name, *args) 760: case table_name 761: when SQL::QualifiedIdentifier 762: split_qualifiers(table_name.table, nil) + split_qualifiers(table_name.column, nil) 763: else 764: sch, table = schema_and_table(table_name, *args) 765: sch ? [sch, table] : [table] 766: end 767: end
Append literalization of subscripts (SQL array accesses) to SQL string.
# File lib/sequel/dataset/sql.rb, line 770 770: def subscript_sql_append(sql, s) 771: literal_append(sql, s.f) 772: sql << BRACKET_OPEN 773: if s.sub.length == 1 && (range = s.sub.first).is_a?(Range) 774: literal_append(sql, range.begin) 775: sql << COLON 776: e = range.end 777: e -= 1 if range.exclude_end? && e.is_a?(Integer) 778: literal_append(sql, e) 779: else 780: expression_list_append(sql, s.sub) 781: end 782: sql << BRACKET_CLOSE 783: end
Append literalization of windows (for window functions) to SQL string.
# File lib/sequel/dataset/sql.rb, line 786 786: def window_sql_append(sql, opts) 787: raise(Error, 'This dataset does not support window functions') unless supports_window_functions? 788: sql << PAREN_OPEN 789: window, part, order, frame = opts.values_at(:window, :partition, :order, :frame) 790: space = false 791: space_s = SPACE 792: if window 793: literal_append(sql, window) 794: space = true 795: end 796: if part 797: sql << space_s if space 798: sql << PARTITION_BY 799: expression_list_append(sql, Array(part)) 800: space = true 801: end 802: if order 803: sql << space_s if space 804: sql << ORDER_BY_NS 805: expression_list_append(sql, Array(order)) 806: space = true 807: end 808: case frame 809: when nil 810: # nothing 811: when :all 812: sql << space_s if space 813: sql << FRAME_ALL 814: when :rows 815: sql << space_s if space 816: sql << FRAME_ROWS 817: when String 818: sql << space_s if space 819: sql << frame 820: else 821: raise Error, "invalid window frame clause, should be :all, :rows, a string, or nil" 822: end 823: sql << PAREN_CLOSE 824: end
These methods all return modified copies of the receiver.
EXTENSIONS | = | {} | Hash of extension name symbols to callable objects to load the extension into the Dataset object (usually by extending it with a module defined in the extension). | |
COLUMN_CHANGE_OPTS | = | [:select, :sql, :from, :join].freeze | The dataset options that require the removal of cached columns if changed. | |
NON_SQL_OPTIONS | = | [:server, :defaults, :overrides, :graph, :eager, :eager_graph, :graph_aliases] | Which options don‘t affect the SQL generation. Used by simple_select_all? to determine if this is a simple SELECT * FROM table. | |
CONDITIONED_JOIN_TYPES | = | [:inner, :full_outer, :right_outer, :left_outer, :full, :right, :left] | These symbols have _join methods created (e.g. inner_join) that call join_table with the symbol, passing along the arguments and block from the method call. | |
UNCONDITIONED_JOIN_TYPES | = | [:natural, :natural_left, :natural_right, :natural_full, :cross] | These symbols have _join methods created (e.g. natural_join). They accept a table argument and options hash which is passed to join_table, and they raise an error if called with a block. | |
JOIN_METHODS | = | (CONDITIONED_JOIN_TYPES + UNCONDITIONED_JOIN_TYPES).map{|x| "#{x}_join".to_sym} + [:join, :join_table] | All methods that return modified datasets with a joined table added. | |
QUERY_METHODS | = | (<<-METHS).split.map{|x| x.to_sym} + JOIN_METHODS add_graph_aliases and distinct except exclude exclude_having exclude_where filter for_update from from_self graph grep group group_and_count group_by having intersect invert limit lock_style naked offset or order order_append order_by order_more order_prepend qualify reverse reverse_order select select_all select_append select_group select_more server set_graph_aliases unfiltered ungraphed ungrouped union unlimited unordered where with with_recursive with_sql METHS ).split.map{|x| x.to_sym} + JOIN_METHODS | Methods that return modified datasets |
Register an extension callback for Dataset objects. ext should be the extension name symbol, and mod should either be a Module that the dataset is extended with, or a callable object called with the database object. If mod is not provided, a block can be provided and is treated as the mod object.
If mod is a module, this also registers a Database extension that will extend all of the database‘s datasets.
# File lib/sequel/dataset/query.rb, line 53 53: def self.register_extension(ext, mod=nil, &block) 54: if mod 55: raise(Error, "cannot provide both mod and block to Dataset.register_extension") if block 56: if mod.is_a?(Module) 57: block = proc{|ds| ds.extend(mod)} 58: Sequel::Database.register_extension(ext){|db| db.extend_datasets(mod)} 59: else 60: block = mod 61: end 62: end 63: Sequel.synchronize{EXTENSIONS[ext] = block} 64: end
Returns a new clone of the dataset with the given options merged. If the options changed include options in COLUMN_CHANGE_OPTS, the cached columns are deleted. This method should generally not be called directly by user code.
# File lib/sequel/dataset/query.rb, line 75 75: def clone(opts = nil) 76: c = super() 77: if opts 78: c.instance_variable_set(:@opts, @opts.merge(opts)) 79: c.instance_variable_set(:@columns, nil) if @columns && !opts.each_key{|o| break if COLUMN_CHANGE_OPTS.include?(o)} 80: else 81: c.instance_variable_set(:@opts, @opts.dup) 82: end 83: c 84: end
Returns a copy of the dataset with the SQL DISTINCT clause. The DISTINCT clause is used to remove duplicate rows from the output. If arguments are provided, uses a DISTINCT ON clause, in which case it will only be distinct on those columns, instead of all returned columns. If a block is given, it is treated as a virtual row block, similar to where. Raises an error if arguments are given and DISTINCT ON is not supported.
DB[:items].distinct # SQL: SELECT DISTINCT * FROM items DB[:items].order(:id).distinct(:id) # SQL: SELECT DISTINCT ON (id) * FROM items ORDER BY id DB[:items].order(:id).distinct{func(:id)} # SQL: SELECT DISTINCT ON (func(id)) * FROM items ORDER BY id
# File lib/sequel/dataset/query.rb, line 96 96: def distinct(*args, &block) 97: virtual_row_columns(args, block) 98: raise(InvalidOperation, "DISTINCT ON not supported") if !args.empty? && !supports_distinct_on? 99: clone(:distinct => args) 100: end
Adds an EXCEPT clause using a second dataset object. An EXCEPT compound dataset returns all rows in the current dataset that are not in the given dataset. Raises an InvalidOperation if the operation is not supported. Options:
:alias : | Use the given value as the from_self alias |
:all : | Set to true to use EXCEPT ALL instead of EXCEPT, so duplicate rows can occur |
:from_self : | Set to false to not wrap the returned dataset in a from_self, use with care. |
DB[:items].except(DB[:other_items]) # SELECT * FROM (SELECT * FROM items EXCEPT SELECT * FROM other_items) AS t1 DB[:items].except(DB[:other_items], :all=>true, :from_self=>false) # SELECT * FROM items EXCEPT ALL SELECT * FROM other_items DB[:items].except(DB[:other_items], :alias=>:i) # SELECT * FROM (SELECT * FROM items EXCEPT SELECT * FROM other_items) AS i
# File lib/sequel/dataset/query.rb, line 119 119: def except(dataset, opts=OPTS) 120: raise(InvalidOperation, "EXCEPT not supported") unless supports_intersect_except? 121: raise(InvalidOperation, "EXCEPT ALL not supported") if opts[:all] && !supports_intersect_except_all? 122: compound_clone(:except, dataset, opts) 123: end
Performs the inverse of Dataset#where. Note that if you have multiple filter conditions, this is not the same as a negation of all conditions.
DB[:items].exclude(:category => 'software') # SELECT * FROM items WHERE (category != 'software') DB[:items].exclude(:category => 'software', :id=>3) # SELECT * FROM items WHERE ((category != 'software') OR (id != 3))
# File lib/sequel/dataset/query.rb, line 133 133: def exclude(*cond, &block) 134: _filter_or_exclude(true, :where, *cond, &block) 135: end
Inverts the given conditions and adds them to the HAVING clause.
DB[:items].select_group(:name).exclude_having{count(name) < 2} # SELECT name FROM items GROUP BY name HAVING (count(name) >= 2)
# File lib/sequel/dataset/query.rb, line 141 141: def exclude_having(*cond, &block) 142: _filter_or_exclude(true, :having, *cond, &block) 143: end
Returns a copy of the dataset with the source changed. If no source is given, removes all tables. If multiple sources are given, it is the same as using a CROSS JOIN (cartesian product) between all tables. If a block is given, it is treated as a virtual row block, similar to where.
DB[:items].from # SQL: SELECT * DB[:items].from(:blah) # SQL: SELECT * FROM blah DB[:items].from(:blah, :foo) # SQL: SELECT * FROM blah, foo DB[:items].from{fun(arg)} # SQL: SELECT * FROM fun(arg)
# File lib/sequel/dataset/query.rb, line 176 176: def from(*source, &block) 177: virtual_row_columns(source, block) 178: table_alias_num = 0 179: ctes = nil 180: source.map! do |s| 181: case s 182: when Dataset 183: if hoist_cte?(s) 184: ctes ||= [] 185: ctes += s.opts[:with] 186: s = s.clone(:with=>nil) 187: end 188: SQL::AliasedExpression.new(s, dataset_alias(table_alias_num+=1)) 189: when Symbol 190: sch, table, aliaz = split_symbol(s) 191: if aliaz 192: s = sch ? SQL::QualifiedIdentifier.new(sch, table) : SQL::Identifier.new(table) 193: SQL::AliasedExpression.new(s, aliaz.to_sym) 194: else 195: s 196: end 197: else 198: s 199: end 200: end 201: o = {:from=>source.empty? ? nil : source} 202: o[:with] = (opts[:with] || []) + ctes if ctes 203: o[:num_dataset_sources] = table_alias_num if table_alias_num > 0 204: clone(o) 205: end
Returns a dataset selecting from the current dataset. Supplying the :alias option controls the alias of the result.
ds = DB[:items].order(:name).select(:id, :name) # SELECT id,name FROM items ORDER BY name ds.from_self # SELECT * FROM (SELECT id, name FROM items ORDER BY name) AS t1 ds.from_self(:alias=>:foo) # SELECT * FROM (SELECT id, name FROM items ORDER BY name) AS foo ds.from_self(:alias=>:foo, :column_aliases=>[:c1, :c2]) # SELECT * FROM (SELECT id, name FROM items ORDER BY name) AS foo(c1, c2)
# File lib/sequel/dataset/query.rb, line 221 221: def from_self(opts=OPTS) 222: fs = {} 223: @opts.keys.each{|k| fs[k] = nil unless NON_SQL_OPTIONS.include?(k)} 224: clone(fs).from(opts[:alias] ? as(opts[:alias], opts[:column_aliases]) : self) 225: end
Match any of the columns to any of the patterns. The terms can be strings (which use LIKE) or regular expressions (which are only supported on MySQL and PostgreSQL). Note that the total number of pattern matches will be Array(columns).length * Array(terms).length, which could cause performance issues.
Options (all are boolean):
:all_columns : | All columns must be matched to any of the given patterns. |
:all_patterns : | All patterns must match at least one of the columns. |
:case_insensitive : | Use a case insensitive pattern match (the default is case sensitive if the database supports it). |
If both :all_columns and :all_patterns are true, all columns must match all patterns.
Examples:
dataset.grep(:a, '%test%') # SELECT * FROM items WHERE (a LIKE '%test%' ESCAPE '\') dataset.grep([:a, :b], %w'%test% foo') # SELECT * FROM items WHERE ((a LIKE '%test%' ESCAPE '\') OR (a LIKE 'foo' ESCAPE '\') # OR (b LIKE '%test%' ESCAPE '\') OR (b LIKE 'foo' ESCAPE '\')) dataset.grep([:a, :b], %w'%foo% %bar%', :all_patterns=>true) # SELECT * FROM a WHERE (((a LIKE '%foo%' ESCAPE '\') OR (b LIKE '%foo%' ESCAPE '\')) # AND ((a LIKE '%bar%' ESCAPE '\') OR (b LIKE '%bar%' ESCAPE '\'))) dataset.grep([:a, :b], %w'%foo% %bar%', :all_columns=>true) # SELECT * FROM a WHERE (((a LIKE '%foo%' ESCAPE '\') OR (a LIKE '%bar%' ESCAPE '\')) # AND ((b LIKE '%foo%' ESCAPE '\') OR (b LIKE '%bar%' ESCAPE '\'))) dataset.grep([:a, :b], %w'%foo% %bar%', :all_patterns=>true, :all_columns=>true) # SELECT * FROM a WHERE ((a LIKE '%foo%' ESCAPE '\') AND (b LIKE '%foo%' ESCAPE '\') # AND (a LIKE '%bar%' ESCAPE '\') AND (b LIKE '%bar%' ESCAPE '\'))
# File lib/sequel/dataset/query.rb, line 262 262: def grep(columns, patterns, opts=OPTS) 263: if opts[:all_patterns] 264: conds = Array(patterns).map do |pat| 265: SQL::BooleanExpression.new(opts[:all_columns] ? :AND : :OR, *Array(columns).map{|c| SQL::StringExpression.like(c, pat, opts)}) 266: end 267: where(SQL::BooleanExpression.new(opts[:all_patterns] ? :AND : :OR, *conds)) 268: else 269: conds = Array(columns).map do |c| 270: SQL::BooleanExpression.new(:OR, *Array(patterns).map{|pat| SQL::StringExpression.like(c, pat, opts)}) 271: end 272: where(SQL::BooleanExpression.new(opts[:all_columns] ? :AND : :OR, *conds)) 273: end 274: end
Returns a copy of the dataset with the results grouped by the value of the given columns. If a block is given, it is treated as a virtual row block, similar to where.
DB[:items].group(:id) # SELECT * FROM items GROUP BY id DB[:items].group(:id, :name) # SELECT * FROM items GROUP BY id, name DB[:items].group{[a, sum(b)]} # SELECT * FROM items GROUP BY a, sum(b)
# File lib/sequel/dataset/query.rb, line 283 283: def group(*columns, &block) 284: virtual_row_columns(columns, block) 285: clone(:group => (columns.compact.empty? ? nil : columns)) 286: end
Returns a dataset grouped by the given column with count by group. Column aliases may be supplied, and will be included in the select clause. If a block is given, it is treated as a virtual row block, similar to where.
Examples:
DB[:items].group_and_count(:name).all # SELECT name, count(*) AS count FROM items GROUP BY name # => [{:name=>'a', :count=>1}, ...] DB[:items].group_and_count(:first_name, :last_name).all # SELECT first_name, last_name, count(*) AS count FROM items GROUP BY first_name, last_name # => [{:first_name=>'a', :last_name=>'b', :count=>1}, ...] DB[:items].group_and_count(:first_name___name).all # SELECT first_name AS name, count(*) AS count FROM items GROUP BY first_name # => [{:name=>'a', :count=>1}, ...] DB[:items].group_and_count{substr(first_name, 1, 1).as(initial)}.all # SELECT substr(first_name, 1, 1) AS initial, count(*) AS count FROM items GROUP BY substr(first_name, 1, 1) # => [{:initial=>'a', :count=>1}, ...]
# File lib/sequel/dataset/query.rb, line 314 314: def group_and_count(*columns, &block) 315: select_group(*columns, &block).select_more(COUNT_OF_ALL_AS_COUNT) 316: end
Adds the appropriate CUBE syntax to GROUP BY.
# File lib/sequel/dataset/query.rb, line 319 319: def group_cube 320: raise Error, "GROUP BY CUBE not supported on #{db.database_type}" unless supports_group_cube? 321: clone(:group_options=>:cube) 322: end
Adds the appropriate ROLLUP syntax to GROUP BY.
# File lib/sequel/dataset/query.rb, line 325 325: def group_rollup 326: raise Error, "GROUP BY ROLLUP not supported on #{db.database_type}" unless supports_group_rollup? 327: clone(:group_options=>:rollup) 328: end
Returns a copy of the dataset with the HAVING conditions changed. See where for argument types.
DB[:items].group(:sum).having(:sum=>10) # SELECT * FROM items GROUP BY sum HAVING (sum = 10)
# File lib/sequel/dataset/query.rb, line 334 334: def having(*cond, &block) 335: _filter(:having, *cond, &block) 336: end
Adds an INTERSECT clause using a second dataset object. An INTERSECT compound dataset returns all rows in both the current dataset and the given dataset. Raises an InvalidOperation if the operation is not supported. Options:
:alias : | Use the given value as the from_self alias |
:all : | Set to true to use INTERSECT ALL instead of INTERSECT, so duplicate rows can occur |
:from_self : | Set to false to not wrap the returned dataset in a from_self, use with care. |
DB[:items].intersect(DB[:other_items]) # SELECT * FROM (SELECT * FROM items INTERSECT SELECT * FROM other_items) AS t1 DB[:items].intersect(DB[:other_items], :all=>true, :from_self=>false) # SELECT * FROM items INTERSECT ALL SELECT * FROM other_items DB[:items].intersect(DB[:other_items], :alias=>:i) # SELECT * FROM (SELECT * FROM items INTERSECT SELECT * FROM other_items) AS i
# File lib/sequel/dataset/query.rb, line 355 355: def intersect(dataset, opts=OPTS) 356: raise(InvalidOperation, "INTERSECT not supported") unless supports_intersect_except? 357: raise(InvalidOperation, "INTERSECT ALL not supported") if opts[:all] && !supports_intersect_except_all? 358: compound_clone(:intersect, dataset, opts) 359: end
Inverts the current WHERE and HAVING clauses. If there is neither a WHERE or HAVING clause, adds a WHERE clause that is always false.
DB[:items].where(:category => 'software').invert # SELECT * FROM items WHERE (category != 'software') DB[:items].where(:category => 'software', :id=>3).invert # SELECT * FROM items WHERE ((category != 'software') OR (id != 3))
# File lib/sequel/dataset/query.rb, line 369 369: def invert 370: having, where = @opts.values_at(:having, :where) 371: if having.nil? && where.nil? 372: where(false) 373: else 374: o = {} 375: o[:having] = SQL::BooleanExpression.invert(having) if having 376: o[:where] = SQL::BooleanExpression.invert(where) if where 377: clone(o) 378: end 379: end
Alias of inner_join
# File lib/sequel/dataset/query.rb, line 382 382: def join(*args, &block) 383: inner_join(*args, &block) 384: end
Returns a joined dataset. Not usually called directly, users should use the appropriate join method (e.g. join, left_join, natural_join, cross_join) which fills in the type argument.
Takes the following arguments:
type : | The type of join to do (e.g. :inner) | ||||||||
table : | table to join into the current dataset.
Generally one of the following types:
| ||||||||
expr : | conditions used when joining, depends on type:
| ||||||||
options : | a hash of options, with the following keys supported:
| ||||||||
block : | The block argument should only be given if a JOIN with an ON clause is used, in which case it yields the table alias/name for the table currently being joined, the table alias/name for the last joined (or first table), and an array of previous SQL::JoinClause. Unlike where, this block is not treated as a virtual row block. |
Examples:
DB[:a].join_table(:cross, :b) # SELECT * FROM a CROSS JOIN b DB[:a].join_table(:inner, DB[:b], :c=>d) # SELECT * FROM a INNER JOIN (SELECT * FROM b) AS t1 ON (t1.c = a.d) DB[:a].join_table(:left, :b___c, [:d]) # SELECT * FROM a LEFT JOIN b AS c USING (d) DB[:a].natural_join(:b).join_table(:inner, :c) do |ta, jta, js| (Sequel.qualify(ta, :d) > Sequel.qualify(jta, :e)) & {Sequel.qualify(ta, :f)=>DB.from(js.first.table).select(:g)} end # SELECT * FROM a NATURAL JOIN b INNER JOIN c # ON ((c.d > b.e) AND (c.f IN (SELECT g FROM b)))
# File lib/sequel/dataset/query.rb, line 445 445: def join_table(type, table, expr=nil, options=OPTS, &block) 446: if hoist_cte?(table) 447: s, ds = hoist_cte(table) 448: return s.join_table(type, ds, expr, options, &block) 449: end 450: 451: using_join = expr.is_a?(Array) && !expr.empty? && expr.all?{|x| x.is_a?(Symbol)} 452: if using_join && !supports_join_using? 453: h = {} 454: expr.each{|e| h[e] = e} 455: return join_table(type, table, h, options) 456: end 457: 458: table_alias = options[:table_alias] 459: last_alias = options[:implicit_qualifier] 460: qualify_type = options[:qualify] 461: 462: if table.is_a?(SQL::AliasedExpression) 463: table_expr = if table_alias 464: SQL::AliasedExpression.new(table.expression, table_alias, table.columns) 465: else 466: table 467: end 468: table = table_expr.expression 469: table_name = table_alias = table_expr.alias 470: elsif table.is_a?(Dataset) 471: if table_alias.nil? 472: table_alias_num = (@opts[:num_dataset_sources] || 0) + 1 473: table_alias = dataset_alias(table_alias_num) 474: end 475: table_name = table_alias 476: table_expr = SQL::AliasedExpression.new(table, table_alias) 477: else 478: table, implicit_table_alias = split_alias(table) 479: table_alias ||= implicit_table_alias 480: table_name = table_alias || table 481: table_expr = table_alias ? SQL::AliasedExpression.new(table, table_alias) : table 482: end 483: 484: join = if expr.nil? and !block 485: SQL::JoinClause.new(type, table_expr) 486: elsif using_join 487: raise(Sequel::Error, "can't use a block if providing an array of symbols as expr") if block 488: SQL::JoinUsingClause.new(expr, type, table_expr) 489: else 490: last_alias ||= @opts[:last_joined_table] || first_source_alias 491: if Sequel.condition_specifier?(expr) 492: expr = expr.collect do |k, v| 493: qualify_type = default_join_table_qualification if qualify_type.nil? 494: case qualify_type 495: when false 496: nil # Do no qualification 497: when :deep 498: k = Sequel::Qualifier.new(self, table_name).transform(k) 499: v = Sequel::Qualifier.new(self, last_alias).transform(v) 500: else 501: k = qualified_column_name(k, table_name) if k.is_a?(Symbol) 502: v = qualified_column_name(v, last_alias) if v.is_a?(Symbol) 503: end 504: [k,v] 505: end 506: expr = SQL::BooleanExpression.from_value_pairs(expr) 507: end 508: if block 509: expr2 = yield(table_name, last_alias, @opts[:join] || []) 510: expr = expr ? SQL::BooleanExpression.new(:AND, expr, expr2) : expr2 511: end 512: SQL::JoinOnClause.new(expr, type, table_expr) 513: end 514: 515: opts = {:join => (@opts[:join] || []) + [join]} 516: opts[:last_joined_table] = table_name unless options[:reset_implicit_qualifier] == false 517: opts[:num_dataset_sources] = table_alias_num if table_alias_num 518: clone(opts) 519: end
Marks this dataset as a lateral dataset. If used in another dataset‘s FROM or JOIN clauses, it will surround the subquery with LATERAL to enable it to deal with previous tables in the query:
DB.from(:a, DB[:b].where(:a__c=>:b__d).lateral) # SELECT * FROM a, LATERAL (SELECT * FROM b WHERE (a.c = b.d))
# File lib/sequel/dataset/query.rb, line 541 541: def lateral 542: clone(:lateral=>true) 543: end
If given an integer, the dataset will contain only the first l results. If given a range, it will contain only those at offsets within that range. If a second argument is given, it is used as an offset. To use an offset without a limit, pass nil as the first argument.
DB[:items].limit(10) # SELECT * FROM items LIMIT 10 DB[:items].limit(10, 20) # SELECT * FROM items LIMIT 10 OFFSET 20 DB[:items].limit(10...20) # SELECT * FROM items LIMIT 10 OFFSET 10 DB[:items].limit(10..20) # SELECT * FROM items LIMIT 11 OFFSET 10 DB[:items].limit(nil, 20) # SELECT * FROM items OFFSET 20
# File lib/sequel/dataset/query.rb, line 555 555: def limit(l, o = (no_offset = true; nil)) 556: return from_self.limit(l, o) if @opts[:sql] 557: 558: if l.is_a?(Range) 559: no_offset = false 560: o = l.first 561: l = l.last - l.first + (l.exclude_end? ? 0 : 1) 562: end 563: l = l.to_i if l.is_a?(String) && !l.is_a?(LiteralString) 564: if l.is_a?(Integer) 565: raise(Error, 'Limits must be greater than or equal to 1') unless l >= 1 566: end 567: 568: ds = clone(:limit=>l) 569: ds = ds.offset(o) unless no_offset 570: ds 571: end
Returns a cloned dataset with the given lock style. If style is a string, it will be used directly. You should never pass a string to this method that is derived from user input, as that can lead to SQL injection.
A symbol may be used for database independent locking behavior, but all supported symbols have separate methods (e.g. for_update).
DB[:items].lock_style('FOR SHARE NOWAIT') # SELECT * FROM items FOR SHARE NOWAIT
# File lib/sequel/dataset/query.rb, line 582 582: def lock_style(style) 583: clone(:lock => style) 584: end
Returns a cloned dataset without a row_proc.
ds = DB[:items] ds.row_proc = proc{|r| r.invert} ds.all # => [{2=>:id}] ds.naked.all # => [{:id=>2}]
# File lib/sequel/dataset/query.rb, line 592 592: def naked 593: ds = clone 594: ds.row_proc = nil 595: ds 596: end
Returns a copy of the dataset with a specified order. Can be safely combined with limit. If you call limit with an offset, it will override override the offset if you‘ve called offset first.
DB[:items].offset(10) # SELECT * FROM items OFFSET 10
# File lib/sequel/dataset/query.rb, line 603 603: def offset(o) 604: o = o.to_i if o.is_a?(String) && !o.is_a?(LiteralString) 605: if o.is_a?(Integer) 606: raise(Error, 'Offsets must be greater than or equal to 0') unless o >= 0 607: end 608: clone(:offset => o) 609: end
Adds an alternate filter to an existing filter using OR. If no filter exists an Error is raised.
DB[:items].where(:a).or(:b) # SELECT * FROM items WHERE a OR b
# File lib/sequel/dataset/query.rb, line 615 615: def or(*cond, &block) 616: cond = cond.first if cond.size == 1 617: v = @opts[:where] 618: if v.nil? || (cond.respond_to?(:empty?) && cond.empty? && !block) 619: clone 620: else 621: clone(:where => SQL::BooleanExpression.new(:OR, v, filter_expr(cond, &block))) 622: end 623: end
Returns a copy of the dataset with the order changed. If the dataset has an existing order, it is ignored and overwritten with this order. If a nil is given the returned dataset has no order. This can accept multiple arguments of varying kinds, such as SQL functions. If a block is given, it is treated as a virtual row block, similar to where.
DB[:items].order(:name) # SELECT * FROM items ORDER BY name DB[:items].order(:a, :b) # SELECT * FROM items ORDER BY a, b DB[:items].order(Sequel.lit('a + b')) # SELECT * FROM items ORDER BY a + b DB[:items].order(:a + :b) # SELECT * FROM items ORDER BY (a + b) DB[:items].order(Sequel.desc(:name)) # SELECT * FROM items ORDER BY name DESC DB[:items].order(Sequel.asc(:name, :nulls=>:last)) # SELECT * FROM items ORDER BY name ASC NULLS LAST DB[:items].order{sum(name).desc} # SELECT * FROM items ORDER BY sum(name) DESC DB[:items].order(nil) # SELECT * FROM items
# File lib/sequel/dataset/query.rb, line 639 639: def order(*columns, &block) 640: virtual_row_columns(columns, block) 641: clone(:order => (columns.compact.empty?) ? nil : columns) 642: end
Alias of order_more, for naming consistency with order_prepend.
# File lib/sequel/dataset/query.rb, line 645 645: def order_append(*columns, &block) 646: order_more(*columns, &block) 647: end
Returns a copy of the dataset with the order columns added to the end of the existing order.
DB[:items].order(:a).order(:b) # SELECT * FROM items ORDER BY b DB[:items].order(:a).order_more(:b) # SELECT * FROM items ORDER BY a, b
# File lib/sequel/dataset/query.rb, line 659 659: def order_more(*columns, &block) 660: columns = @opts[:order] + columns if @opts[:order] 661: order(*columns, &block) 662: end
Returns a copy of the dataset with the order columns added to the beginning of the existing order.
DB[:items].order(:a).order(:b) # SELECT * FROM items ORDER BY b DB[:items].order(:a).order_prepend(:b) # SELECT * FROM items ORDER BY b, a
# File lib/sequel/dataset/query.rb, line 669 669: def order_prepend(*columns, &block) 670: ds = order(*columns, &block) 671: @opts[:order] ? ds.order_more(*@opts[:order]) : ds 672: end
Qualify to the given table, or first source if no table is given.
DB[:items].where(:id=>1).qualify # SELECT items.* FROM items WHERE (items.id = 1) DB[:items].where(:id=>1).qualify(:i) # SELECT i.* FROM items WHERE (i.id = 1)
# File lib/sequel/dataset/query.rb, line 681 681: def qualify(table=first_source) 682: o = @opts 683: return clone if o[:sql] 684: h = {} 685: (o.keys & QUALIFY_KEYS).each do |k| 686: h[k] = qualified_expression(o[k], table) 687: end 688: h[:select] = [SQL::ColumnAll.new(table)] if !o[:select] || o[:select].empty? 689: clone(h) 690: end
Modify the RETURNING clause, only supported on a few databases. If returning is used, instead of insert returning the autogenerated primary key or update/delete returning the number of modified rows, results are returned using fetch_rows.
DB[:items].returning # RETURNING * DB[:items].returning(nil) # RETURNING NULL DB[:items].returning(:id, :name) # RETURNING id, name
# File lib/sequel/dataset/query.rb, line 700 700: def returning(*values) 701: raise Error, "RETURNING is not supported on #{db.database_type}" unless supports_returning?(:insert) 702: clone(:returning=>values) 703: end
Returns a copy of the dataset with the order reversed. If no order is given, the existing order is inverted.
DB[:items].reverse(:id) # SELECT * FROM items ORDER BY id DESC DB[:items].reverse{foo(bar)} # SELECT * FROM items ORDER BY foo(bar) DESC DB[:items].order(:id).reverse # SELECT * FROM items ORDER BY id DESC DB[:items].order(:id).reverse(Sequel.desc(:name)) # SELECT * FROM items ORDER BY name ASC
# File lib/sequel/dataset/query.rb, line 712 712: def reverse(*order, &block) 713: virtual_row_columns(order, block) 714: order(*invert_order(order.empty? ? @opts[:order] : order)) 715: end
Returns a copy of the dataset with the columns selected changed to the given columns. This also takes a virtual row block, similar to where.
DB[:items].select(:a) # SELECT a FROM items DB[:items].select(:a, :b) # SELECT a, b FROM items DB[:items].select{[a, sum(b)]} # SELECT a, sum(b) FROM items
# File lib/sequel/dataset/query.rb, line 729 729: def select(*columns, &block) 730: virtual_row_columns(columns, block) 731: clone(:select => columns) 732: end
Returns a copy of the dataset selecting the wildcard if no arguments are given. If arguments are given, treat them as tables and select all columns (using the wildcard) from each table.
DB[:items].select(:a).select_all # SELECT * FROM items DB[:items].select_all(:items) # SELECT items.* FROM items DB[:items].select_all(:items, :foo) # SELECT items.*, foo.* FROM items
# File lib/sequel/dataset/query.rb, line 741 741: def select_all(*tables) 742: if tables.empty? 743: clone(:select => nil) 744: else 745: select(*tables.map{|t| i, a = split_alias(t); a || i}.map{|t| SQL::ColumnAll.new(t)}) 746: end 747: end
Returns a copy of the dataset with the given columns added to the existing selected columns. If no columns are currently selected, it will select the columns given in addition to *.
DB[:items].select(:a).select(:b) # SELECT b FROM items DB[:items].select(:a).select_append(:b) # SELECT a, b FROM items DB[:items].select_append(:b) # SELECT *, b FROM items
# File lib/sequel/dataset/query.rb, line 756 756: def select_append(*columns, &block) 757: cur_sel = @opts[:select] 758: if !cur_sel || cur_sel.empty? 759: unless supports_select_all_and_column? 760: return select_all(*(Array(@opts[:from]) + Array(@opts[:join]))).select_more(*columns, &block) 761: end 762: cur_sel = [WILDCARD] 763: end 764: select(*(cur_sel + columns), &block) 765: end
Set both the select and group clauses with the given columns. Column aliases may be supplied, and will be included in the select clause. This also takes a virtual row block similar to where.
DB[:items].select_group(:a, :b) # SELECT a, b FROM items GROUP BY a, b DB[:items].select_group(:c___a){f(c2)} # SELECT c AS a, f(c2) FROM items GROUP BY c, f(c2)
# File lib/sequel/dataset/query.rb, line 776 776: def select_group(*columns, &block) 777: virtual_row_columns(columns, block) 778: select(*columns).group(*columns.map{|c| unaliased_identifier(c)}) 779: end
Alias for select_append.
# File lib/sequel/dataset/query.rb, line 782 782: def select_more(*columns, &block) 783: select_append(*columns, &block) 784: end
Set the server for this dataset to use. Used to pick a specific database shard to run a query against, or to override the default (where SELECT uses :read_only database and all other queries use the :default database). This method is always available but is only useful when database sharding is being used.
DB[:items].all # Uses the :read_only or :default server DB[:items].delete # Uses the :default server DB[:items].server(:blah).delete # Uses the :blah server
# File lib/sequel/dataset/query.rb, line 795 795: def server(servr) 796: clone(:server=>servr) 797: end
If the database uses sharding and the current dataset has not had a server set, return a cloned dataset that uses the given server. Otherwise, return the receiver directly instead of returning a clone.
# File lib/sequel/dataset/query.rb, line 802 802: def server?(server) 803: if db.sharded? && !opts[:server] 804: server(server) 805: else 806: self 807: end 808: end
Unbind bound variables from this dataset‘s filter and return an array of two objects. The first object is a modified dataset where the filter has been replaced with one that uses bound variable placeholders. The second object is the hash of unbound variables. You can then prepare and execute (or just call) the dataset with the bound variables to get results.
ds, bv = DB[:items].where(:a=>1).unbind ds # SELECT * FROM items WHERE (a = $a) bv # {:a => 1} ds.call(:select, bv)
# File lib/sequel/dataset/query.rb, line 820 820: def unbind 821: u = Unbinder.new 822: ds = clone(:where=>u.transform(opts[:where]), :join=>u.transform(opts[:join])) 823: [ds, u.binds] 824: end
Adds a UNION clause using a second dataset object. A UNION compound dataset returns all rows in either the current dataset or the given dataset. Options:
:alias : | Use the given value as the from_self alias |
:all : | Set to true to use UNION ALL instead of UNION, so duplicate rows can occur |
:from_self : | Set to false to not wrap the returned dataset in a from_self, use with care. |
DB[:items].union(DB[:other_items]) # SELECT * FROM (SELECT * FROM items UNION SELECT * FROM other_items) AS t1 DB[:items].union(DB[:other_items], :all=>true, :from_self=>false) # SELECT * FROM items UNION ALL SELECT * FROM other_items DB[:items].union(DB[:other_items], :alias=>:i) # SELECT * FROM (SELECT * FROM items UNION SELECT * FROM other_items) AS i
# File lib/sequel/dataset/query.rb, line 858 858: def union(dataset, opts=OPTS) 859: compound_clone(:union, dataset, opts) 860: end
Returns a copy of the dataset with the given WHERE conditions imposed upon it.
Accepts the following argument types:
Hash : | list of equality/inclusion expressions |
Array : | depends: |
String : | taken literally |
Symbol : | taken as a boolean column argument (e.g. WHERE active) |
Sequel::SQL::BooleanExpression : | an existing condition expression, probably created using the Sequel expression filter DSL. |
where also accepts a block, which should return one of the above argument types, and is treated the same way. This block yields a virtual row object, which is easy to use to create identifiers and functions. For more details on the virtual row support, see the "Virtual Rows" guide
If both a block and regular argument are provided, they get ANDed together.
Examples:
DB[:items].where(:id => 3) # SELECT * FROM items WHERE (id = 3) DB[:items].where('price < ?', 100) # SELECT * FROM items WHERE price < 100 DB[:items].where([[:id, [1,2,3]], [:id, 0..10]]) # SELECT * FROM items WHERE ((id IN (1, 2, 3)) AND ((id >= 0) AND (id <= 10))) DB[:items].where('price < 100') # SELECT * FROM items WHERE price < 100 DB[:items].where(:active) # SELECT * FROM items WHERE :active DB[:items].where{price < 100} # SELECT * FROM items WHERE (price < 100)
Multiple where calls can be chained for scoping:
software = dataset.where(:category => 'software').where{price < 100} # SELECT * FROM items WHERE ((category = 'software') AND (price < 100))
See the "Dataset Filtering" guide for more examples and details.
# File lib/sequel/dataset/query.rb, line 926 926: def where(*cond, &block) 927: _filter(:where, *cond, &block) 928: end
Add a common table expression (CTE) with the given name and a dataset that defines the CTE. A common table expression acts as an inline view for the query. Options:
:args : | Specify the arguments/columns for the CTE, should be an array of symbols. |
:recursive : | Specify that this is a recursive CTE |
DB[:items].with(:items, DB[:syx].where(:name.like('A%'))) # WITH items AS (SELECT * FROM syx WHERE (name LIKE 'A%' ESCAPE '\')) SELECT * FROM items
# File lib/sequel/dataset/query.rb, line 938 938: def with(name, dataset, opts=OPTS) 939: raise(Error, 'This dataset does not support common table expressions') unless supports_cte? 940: if hoist_cte?(dataset) 941: s, ds = hoist_cte(dataset) 942: s.with(name, ds, opts) 943: else 944: clone(:with=>(@opts[:with]||[]) + [opts.merge(:name=>name, :dataset=>dataset)]) 945: end 946: end
Add a recursive common table expression (CTE) with the given name, a dataset that defines the nonrecursive part of the CTE, and a dataset that defines the recursive part of the CTE. Options:
:args : | Specify the arguments/columns for the CTE, should be an array of symbols. |
:union_all : | Set to false to use UNION instead of UNION ALL combining the nonrecursive and recursive parts. |
DB[:t].with_recursive(:t, DB[:i1].select(:id, :parent_id).where(:parent_id=>nil), DB[:i1].join(:t, :id=>:parent_id).select(:i1__id, :i1__parent_id), :args=>[:id, :parent_id]) # WITH RECURSIVE "t"("id", "parent_id") AS ( # SELECT "id", "parent_id" FROM "i1" WHERE ("parent_id" IS NULL) # UNION ALL # SELECT "i1"."id", "i1"."parent_id" FROM "i1" INNER JOIN "t" ON ("t"."id" = "i1"."parent_id") # ) SELECT * FROM "t"
# File lib/sequel/dataset/query.rb, line 964 964: def with_recursive(name, nonrecursive, recursive, opts=OPTS) 965: raise(Error, 'This datatset does not support common table expressions') unless supports_cte? 966: if hoist_cte?(nonrecursive) 967: s, ds = hoist_cte(nonrecursive) 968: s.with_recursive(name, ds, recursive, opts) 969: elsif hoist_cte?(recursive) 970: s, ds = hoist_cte(recursive) 971: s.with_recursive(name, nonrecursive, ds, opts) 972: else 973: clone(:with=>(@opts[:with]||[]) + [opts.merge(:recursive=>true, :name=>name, :dataset=>nonrecursive.union(recursive, {:all=>opts[:union_all] != false, :from_self=>false}))]) 974: end 975: end
Returns a copy of the dataset with the static SQL used. This is useful if you want to keep the same row_proc/graph, but change the SQL used to custom SQL.
DB[:items].with_sql('SELECT * FROM foo') # SELECT * FROM foo
You can use placeholders in your SQL and provide arguments for those placeholders:
DB[:items].with_sql('SELECT ? FROM foo', 1) # SELECT 1 FROM foo
You can also provide a method name and arguments to call to get the SQL:
DB[:items].with_sql(:insert_sql, :b=>1) # INSERT INTO items (b) VALUES (1)
# File lib/sequel/dataset/query.rb, line 989 989: def with_sql(sql, *args) 990: if sql.is_a?(Symbol) 991: sql = send(sql, *args) 992: else 993: sql = SQL::PlaceholderLiteralString.new(sql, args) unless args.empty? 994: end 995: clone(:sql=>sql) 996: end
Add the dataset to the list of compounds
# File lib/sequel/dataset/query.rb, line 1001 1001: def compound_clone(type, dataset, opts) 1002: if hoist_cte?(dataset) 1003: s, ds = hoist_cte(dataset) 1004: return s.compound_clone(type, ds, opts) 1005: end 1006: ds = compound_from_self.clone(:compounds=>Array(@opts[:compounds]).map{|x| x.dup} + [[type, dataset.compound_from_self, opts[:all]]]) 1007: opts[:from_self] == false ? ds : ds.from_self(opts) 1008: end
Return true if the dataset has a non-nil value for any key in opts.
# File lib/sequel/dataset/query.rb, line 1011 1011: def options_overlap(opts) 1012: !(@opts.collect{|k,v| k unless v.nil?}.compact & opts).empty? 1013: end
Whether this dataset is a simple select from an underlying table, such as:
SELECT * FROM table SELECT table.* FROM table
# File lib/sequel/dataset/query.rb, line 1019 1019: def simple_select_all? 1020: o = @opts.reject{|k,v| v.nil? || NON_SQL_OPTIONS.include?(k)} 1021: if (f = o[:from]) && f.length == 1 && (f.first.is_a?(Symbol) || f.first.is_a?(SQL::AliasedExpression)) 1022: case o.length 1023: when 1 1024: true 1025: when 2 1026: (s = o[:select]) && s.length == 1 && s.first.is_a?(SQL::ColumnAll) 1027: else 1028: false 1029: end 1030: else 1031: false 1032: end 1033: end
On some adapters, these use native prepared statements and bound variables, on others support is emulated. For details, see the "Prepared Statements/Bound Variables" guide.
PREPARED_ARG_PLACEHOLDER | = | LiteralString.new('?').freeze |
Set the bind variables to use for the call. If bind variables have already been set for this dataset, they are updated with the contents of bind_vars.
DB[:table].filter(:id=>:$id).bind(:id=>1).call(:first) # SELECT * FROM table WHERE id = ? LIMIT 1 -- (1) # => {:id=>1}
# File lib/sequel/dataset/prepared_statements.rb, line 223 223: def bind(bind_vars={}) 224: clone(:bind_vars=>@opts[:bind_vars] ? @opts[:bind_vars].merge(bind_vars) : bind_vars) 225: end
For the given type (:select, :first, :insert, :insert_select, :update, or :delete), run the sql with the bind variables specified in the hash. values is a hash passed to insert or update (if one of those types is used), which may contain placeholders.
DB[:table].filter(:id=>:$id).call(:first, :id=>1) # SELECT * FROM table WHERE id = ? LIMIT 1 -- (1) # => {:id=>1}
# File lib/sequel/dataset/prepared_statements.rb, line 234 234: def call(type, bind_variables={}, *values, &block) 235: prepare(type, nil, *values).call(bind_variables, &block) 236: end
Prepare an SQL statement for later execution. Takes a type similar to call, and the name symbol of the prepared statement. While name defaults to nil, it should always be provided as a symbol for the name of the prepared statement, as some databases require that prepared statements have names.
This returns a clone of the dataset extended with PreparedStatementMethods, which you can call with the hash of bind variables to use. The prepared statement is also stored in the associated database, where it can be called by name. The following usage is identical:
ps = DB[:table].filter(:name=>:$name).prepare(:first, :select_by_name) ps.call(:name=>'Blah') # SELECT * FROM table WHERE name = ? -- ('Blah') # => {:id=>1, :name=>'Blah'} DB.call(:select_by_name, :name=>'Blah') # Same thing
# File lib/sequel/dataset/prepared_statements.rb, line 256 256: def prepare(type, name=nil, *values) 257: ps = to_prepared_statement(type, values) 258: db.set_prepared_statement(name, ps) if name 259: ps 260: end
Return a cloned copy of the current dataset extended with PreparedStatementMethods, setting the type and modify values.
# File lib/sequel/dataset/prepared_statements.rb, line 266 266: def to_prepared_statement(type, values=nil) 267: ps = bind 268: ps.extend(PreparedStatementMethods) 269: ps.orig_dataset = self 270: ps.prepared_type = type 271: ps.prepared_modify_values = values 272: ps 273: end
MUTATION_METHODS | = | QUERY_METHODS - [:naked, :from_self] | All methods that should have a ! method added that modifies the receiver. |
Setup mutation (e.g. filter!) methods. These operate the same as the non-! methods, but replace the options of the current dataset with the options of the resulting dataset.
Do not call this method with untrusted input, as that can result in arbitrary code execution.
# File lib/sequel/dataset/mutation.rb, line 17 17: def self.def_mutation_method(*meths) 18: options = meths.pop if meths.last.is_a?(Hash) 19: mod = options[:module] if options 20: mod ||= self 21: meths.each do |meth| 22: mod.class_eval("def #{meth}!(*args, &block); mutation_method(:#{meth}, *args, &block) end", __FILE__, __LINE__) 23: end 24: end
Load an extension into the receiver. In addition to requiring the extension file, this also modifies the dataset to work with the extension (usually extending it with a module defined in the extension file). If no related extension file exists or the extension does not have specific support for Database objects, an Error will be raised. Returns self.
# File lib/sequel/dataset/mutation.rb, line 38 38: def extension!(*exts) 39: raise_if_frozen! 40: Sequel.extension(*exts) 41: exts.each do |ext| 42: if pr = Sequel.synchronize{EXTENSIONS[ext]} 43: pr.call(self) 44: else 45: raise(Error, "Extension #{ext} does not have specific support handling individual datasets") 46: end 47: end 48: self 49: end
Avoid self-referential dataset by cloning.
# File lib/sequel/dataset/mutation.rb, line 52 52: def from_self!(*args, &block) 53: raise_if_frozen! 54: @opts = clone.from_self(*args, &block).opts 55: self 56: end
Set whether to quote identifiers for this dataset
# File lib/sequel/dataset/mutation.rb, line 79 79: def quote_identifiers=(v) 80: raise_if_frozen! 81: skip_symbol_cache! 82: @quote_identifiers = v 83: end
These methods don‘t fit cleanly into another section.
NOTIMPL_MSG | = | "This method must be overridden in Sequel adapters".freeze |
ARRAY_ACCESS_ERROR_MSG | = | 'You cannot call Dataset#[] with an integer or with no arguments.'.freeze |
ARG_BLOCK_ERROR_MSG | = | 'Must use either an argument or a block, not both'.freeze |
IMPORT_ERROR_MSG | = | 'Using Sequel::Dataset#import an empty column array is not allowed'.freeze |
Constructs a new Dataset instance with an associated database and options. Datasets are usually constructed by invoking the Database#[] method:
DB[:posts]
Sequel::Dataset is an abstract class that is not useful by itself. Each database adapter provides a subclass of Sequel::Dataset, and has the Database#dataset method return an instance of that subclass.
# File lib/sequel/dataset/misc.rb, line 28 28: def initialize(db) 29: @db = db 30: @opts = OPTS 31: end
Yield a dataset for each server in the connection pool that is tied to that server. Intended for use in sharded environments where all servers need to be modified with the same data:
DB[:configs].where(:key=>'setting').each_server{|ds| ds.update(:value=>'new_value')}
# File lib/sequel/dataset/misc.rb, line 62 62: def each_server 63: db.servers.each{|s| yield server(s)} 64: end
Returns the string with the LIKE metacharacters (% and _) escaped. Useful for when the LIKE term is a user-provided string where metacharacters should not be recognized. Example:
ds.escape_like("foo\\%_") # 'foo\\\%\_'
# File lib/sequel/dataset/misc.rb, line 71 71: def escape_like(string) 72: string.gsub(/[\\%_]/){|m| "\\#{m}"} 73: end
Alias of first_source_alias
# File lib/sequel/dataset/misc.rb, line 87 87: def first_source 88: first_source_alias 89: end
The first source (primary table) for this dataset. If the dataset doesn‘t have a table, raises an Error. If the table is aliased, returns the aliased name.
DB[:table].first_source_alias # => :table DB[:table___t].first_source_alias # => :t
# File lib/sequel/dataset/misc.rb, line 99 99: def first_source_alias 100: source = @opts[:from] 101: if source.nil? || source.empty? 102: raise Error, 'No source specified for query' 103: end 104: case s = source.first 105: when SQL::AliasedExpression 106: s.alias 107: when Symbol 108: _, _, aliaz = split_symbol(s) 109: aliaz ? aliaz.to_sym : s 110: else 111: s 112: end 113: end
The first source (primary table) for this dataset. If the dataset doesn‘t have a table, raises an error. If the table is aliased, returns the original table, not the alias
DB[:table].first_source_table # => :table DB[:table___t].first_source_table # => :table
# File lib/sequel/dataset/misc.rb, line 124 124: def first_source_table 125: source = @opts[:from] 126: if source.nil? || source.empty? 127: raise Error, 'No source specified for query' 128: end 129: case s = source.first 130: when SQL::AliasedExpression 131: s.expression 132: when Symbol 133: sch, table, aliaz = split_symbol(s) 134: aliaz ? (sch ? SQL::QualifiedIdentifier.new(sch, table) : table.to_sym) : s 135: else 136: s 137: end 138: end
Sets the frozen flag on the dataset, so you can‘t modify it. Returns the receiver.
# File lib/sequel/dataset/misc.rb, line 76 76: def freeze 77: @opts[:frozen] = true 78: self 79: end
Whether the object is frozen.
# File lib/sequel/dataset/misc.rb, line 82 82: def frozen? 83: @opts[:frozen] 84: end
The String instance method to call on identifiers before sending them to the database.
# File lib/sequel/dataset/misc.rb, line 148 148: def identifier_input_method 149: if defined?(@identifier_input_method) 150: @identifier_input_method 151: else 152: @identifier_input_method = db.identifier_input_method 153: end 154: end
The String instance method to call on identifiers before sending them to the database.
# File lib/sequel/dataset/misc.rb, line 158 158: def identifier_output_method 159: if defined?(@identifier_output_method) 160: @identifier_output_method 161: else 162: @identifier_output_method = db.identifier_output_method 163: end 164: end
Splits a possible implicit alias in c, handling both SQL::AliasedExpressions and Symbols. Returns an array of two elements, with the first being the main expression, and the second being the alias.
# File lib/sequel/dataset/misc.rb, line 186 186: def split_alias(c) 187: case c 188: when Symbol 189: c_table, column, aliaz = split_symbol(c) 190: [c_table ? SQL::QualifiedIdentifier.new(c_table, column.to_sym) : column.to_sym, aliaz] 191: when SQL::AliasedExpression 192: [c.expression, c.alias] 193: when SQL::JoinClause 194: [c.table, c.table_alias] 195: else 196: [c, nil] 197: end 198: end
This returns an SQL::Identifier or SQL::AliasedExpression containing an SQL identifier that represents the unqualified column for the given value. The given value should be a Symbol, SQL::Identifier, SQL::QualifiedIdentifier, or SQL::AliasedExpression containing one of those. In other cases, this returns nil
# File lib/sequel/dataset/misc.rb, line 205 205: def unqualified_column_for(v) 206: unless v.is_a?(String) 207: _unqualified_column_for(v) 208: end 209: end
Creates a unique table alias that hasn‘t already been used in the dataset. table_alias can be any type of object accepted by alias_symbol. The symbol returned will be the implicit alias in the argument, possibly appended with "_N" if the implicit alias has already been used, where N is an integer starting at 0 and increasing until an unused one is found.
You can provide a second addition array argument containing symbols that should not be considered valid table aliases. The current aliases for the FROM and JOIN tables are automatically included in this array.
DB[:table].unused_table_alias(:t) # => :t DB[:table].unused_table_alias(:table) # => :table_0 DB[:table, :table_0].unused_table_alias(:table) # => :table_1 DB[:table, :table_0].unused_table_alias(:table, [:table_1, :table_2]) # => :table_3
# File lib/sequel/dataset/misc.rb, line 233 233: def unused_table_alias(table_alias, used_aliases = []) 234: table_alias = alias_symbol(table_alias) 235: used_aliases += opts[:from].map{|t| alias_symbol(t)} if opts[:from] 236: used_aliases += opts[:join].map{|j| j.table_alias ? alias_alias_symbol(j.table_alias) : alias_symbol(j.table)} if opts[:join] 237: if used_aliases.include?(table_alias) 238: i = 0 239: loop do 240: ta = "#{table_alias}_#{i}""#{table_alias}_#{i}" 241: return ta unless used_aliases.include?(ta) 242: i += 1 243: end 244: else 245: table_alias 246: end 247: end
Dataset graphing changes the dataset to yield hashes where keys are table name symbols and values are hashes representing the columns related to that table. All of these methods return modified copies of the receiver.
Adds the given graph aliases to the list of graph aliases to use, unlike set_graph_aliases, which replaces the list (the equivalent of select_more when graphing). See set_graph_aliases.
DB[:table].add_graph_aliases(:some_alias=>[:table, :column]) # SELECT ..., table.column AS some_alias
# File lib/sequel/dataset/graph.rb, line 16 16: def add_graph_aliases(graph_aliases) 17: unless (ga = opts[:graph_aliases]) || (opts[:graph] && (ga = opts[:graph][:column_aliases])) 18: raise Error, "cannot call add_graph_aliases on a dataset that has not been called with graph or set_graph_aliases" 19: end 20: columns, graph_aliases = graph_alias_columns(graph_aliases) 21: select_more(*columns).clone(:graph_aliases => ga.merge(graph_aliases)) 22: end
Similar to Dataset#join_table, but uses unambiguous aliases for selected columns and keeps metadata about the aliases for use in other methods.
Arguments:
dataset : | Can be a symbol (specifying a table), another dataset, or an SQL::Identifier, SQL::QualifiedIdentifier, or SQL::AliasedExpression. |
join_conditions : | Any condition(s) allowed by join_table. |
block : | A block that is passed to join_table. |
Options:
:from_self_alias : | The alias to use when the receiver is not a graphed dataset but it contains multiple FROM tables or a JOIN. In this case, the receiver is wrapped in a from_self before graphing, and this option determines the alias to use. |
:implicit_qualifier : | The qualifier of implicit conditions, see join_table. |
:join_only : | Only join the tables, do not change the selected columns. |
:join_type : | The type of join to use (passed to join_table). Defaults to :left_outer. |
:qualify: | The type of qualification to do, see join_table. |
:select : | An array of columns to select. When not used, selects all columns in the given dataset. When set to false, selects no columns and is like simply joining the tables, though graph keeps some metadata about the join that makes it important to use graph instead of join_table. |
:table_alias : | The alias to use for the table. If not specified, doesn‘t alias the table. You will get an error if the alias (or table) name is used more than once. |
# File lib/sequel/dataset/graph.rb, line 50 50: def graph(dataset, join_conditions = nil, options = OPTS, &block) 51: # Allow the use of a dataset or symbol as the first argument 52: # Find the table name/dataset based on the argument 53: table_alias = options[:table_alias] 54: table = dataset 55: create_dataset = true 56: 57: case dataset 58: when Symbol 59: # let alias be the same as the table name (sans any optional schema) 60: # unless alias explicitly given in the symbol using ___ notation 61: table_alias ||= split_symbol(table).compact.last 62: when Dataset 63: if dataset.simple_select_all? 64: table = dataset.opts[:from].first 65: table_alias ||= table 66: else 67: table_alias ||= dataset_alias((@opts[:num_dataset_sources] || 0)+1) 68: end 69: create_dataset = false 70: when SQL::Identifier 71: table_alias ||= table.value 72: when SQL::QualifiedIdentifier 73: table_alias ||= split_qualifiers(table).last 74: when SQL::AliasedExpression 75: return graph(table.expression, join_conditions, {:table_alias=>table.alias}.merge(options), &block) 76: else 77: raise Error, "The dataset argument should be a symbol or dataset" 78: end 79: table_alias = table_alias.to_sym 80: 81: if create_dataset 82: dataset = db.from(table) 83: end 84: 85: # Raise Sequel::Error with explanation that the table alias has been used 86: raise_alias_error = lambda do 87: raise(Error, "this #{options[:table_alias] ? 'alias' : 'table'} has already been been used, please specify " \ 88: "#{options[:table_alias] ? 'a different alias' : 'an alias via the :table_alias option'}") 89: end 90: 91: # Only allow table aliases that haven't been used 92: raise_alias_error.call if @opts[:graph] && @opts[:graph][:table_aliases] && @opts[:graph][:table_aliases].include?(table_alias) 93: 94: table_alias_qualifier = qualifier_from_alias_symbol(table_alias, table) 95: implicit_qualifier = options[:implicit_qualifier] 96: ds = self 97: 98: # Use a from_self if this is already a joined table (or from_self specifically disabled for graphs) 99: if (@opts[:graph_from_self] != false && !@opts[:graph] && joined_dataset?) 100: from_selfed = true 101: implicit_qualifier = options[:from_self_alias] || first_source 102: ds = ds.from_self(:alias=>implicit_qualifier) 103: end 104: 105: # Join the table early in order to avoid cloning the dataset twice 106: ds = ds.join_table(options[:join_type] || :left_outer, table, join_conditions, :table_alias=>table_alias_qualifier, :implicit_qualifier=>implicit_qualifier, :qualify=>options[:qualify], &block) 107: 108: return ds if options[:join_only] 109: 110: opts = ds.opts 111: 112: # Whether to include the table in the result set 113: add_table = options[:select] == false ? false : true 114: # Whether to add the columns to the list of column aliases 115: add_columns = !ds.opts.include?(:graph_aliases) 116: 117: if graph = opts[:graph] 118: opts[:graph] = graph = graph.dup 119: select = opts[:select].dup 120: [:column_aliases, :table_aliases, :column_alias_num].each{|k| graph[k] = graph[k].dup} 121: else 122: # Setup the initial graph data structure if it doesn't exist 123: qualifier = ds.first_source_alias 124: master = alias_symbol(qualifier) 125: raise_alias_error.call if master == table_alias 126: 127: # Master hash storing all .graph related information 128: graph = opts[:graph] = {} 129: 130: # Associates column aliases back to tables and columns 131: column_aliases = graph[:column_aliases] = {} 132: 133: # Associates table alias (the master is never aliased) 134: table_aliases = graph[:table_aliases] = {master=>self} 135: 136: # Keep track of the alias numbers used 137: ca_num = graph[:column_alias_num] = Hash.new(0) 138: 139: # All columns in the master table are never 140: # aliased, but are not included if set_graph_aliases 141: # has been used. 142: if add_columns 143: if (select = @opts[:select]) && !select.empty? && !(select.length == 1 && (select.first.is_a?(SQL::ColumnAll))) 144: select = select.map do |sel| 145: raise Error, "can't figure out alias to use for graphing for #{sel.inspect}" unless column = _hash_key_symbol(sel) 146: column_aliases[column] = [master, column] 147: if from_selfed 148: # Initial dataset was wrapped in subselect, selected all 149: # columns in the subselect, qualified by the subselect alias. 150: Sequel.qualify(qualifier, Sequel.identifier(column)) 151: else 152: # Initial dataset not wrapped in subslect, just make 153: # sure columns are qualified in some way. 154: qualified_expression(sel, qualifier) 155: end 156: end 157: else 158: select = columns.map do |column| 159: column_aliases[column] = [master, column] 160: SQL::QualifiedIdentifier.new(qualifier, column) 161: end 162: end 163: end 164: end 165: 166: # Add the table alias to the list of aliases 167: # Even if it isn't been used in the result set, 168: # we add a key for it with a nil value so we can check if it 169: # is used more than once 170: table_aliases = graph[:table_aliases] 171: table_aliases[table_alias] = add_table ? dataset : nil 172: 173: # Add the columns to the selection unless we are ignoring them 174: if add_table && add_columns 175: column_aliases = graph[:column_aliases] 176: ca_num = graph[:column_alias_num] 177: # Which columns to add to the result set 178: cols = options[:select] || dataset.columns 179: # If the column hasn't been used yet, don't alias it. 180: # If it has been used, try table_column. 181: # If that has been used, try table_column_N 182: # using the next value of N that we know hasn't been 183: # used 184: cols.each do |column| 185: col_alias, identifier = if column_aliases[column] 186: column_alias = "#{table_alias}_#{column}""#{table_alias}_#{column}" 187: if column_aliases[column_alias] 188: column_alias_num = ca_num[column_alias] 189: column_alias = "#{column_alias}_#{column_alias_num}""#{column_alias}_#{column_alias_num}" 190: ca_num[column_alias] += 1 191: end 192: [column_alias, SQL::AliasedExpression.new(SQL::QualifiedIdentifier.new(table_alias_qualifier, column), column_alias)] 193: else 194: ident = SQL::QualifiedIdentifier.new(table_alias_qualifier, column) 195: [column, ident] 196: end 197: column_aliases[col_alias] = [table_alias, column] 198: select.push(identifier) 199: end 200: end 201: add_columns ? ds.select(*select) : ds 202: end
This allows you to manually specify the graph aliases to use when using graph. You can use it to only select certain columns, and have those columns mapped to specific aliases in the result set. This is the equivalent of select for a graphed dataset, and must be used instead of select whenever graphing is used.
graph_aliases : | Should be a hash with keys being symbols of column aliases, and values being either symbols or arrays with one to three elements. If the value is a symbol, it is assumed to be the same as a one element array containing that symbol. The first element of the array should be the table alias symbol. The second should be the actual column name symbol. If the array only has a single element the column name symbol will be assumed to be the same as the corresponding hash key. If the array has a third element, it is used as the value returned, instead of table_alias.column_name. |
DB[:artists].graph(:albums, :artist_id=>:id). set_graph_aliases(:name=>:artists, :album_name=>[:albums, :name], :forty_two=>[:albums, :fourtwo, 42]).first # SELECT artists.name, albums.name AS album_name, 42 AS forty_two ...
# File lib/sequel/dataset/graph.rb, line 227 227: def set_graph_aliases(graph_aliases) 228: columns, graph_aliases = graph_alias_columns(graph_aliases) 229: ds = select(*columns) 230: ds.opts[:graph_aliases] = graph_aliases 231: ds 232: end
These methods all return booleans, with most describing whether or not the dataset supports a feature.
Whether this dataset quotes identifiers.
# File lib/sequel/dataset/features.rb, line 10 10: def quote_identifiers? 11: if defined?(@quote_identifiers) 12: @quote_identifiers 13: else 14: @quote_identifiers = db.quote_identifiers? 15: end 16: end
Whether you must use a column alias list for recursive CTEs (false by default).
# File lib/sequel/dataset/features.rb, line 27 27: def recursive_cte_requires_column_aliases? 28: false 29: end
Whether type specifiers are required for prepared statement/bound variable argument placeholders (i.e. :bv__integer)
# File lib/sequel/dataset/features.rb, line 39 39: def requires_placeholder_type_specifiers? 40: false 41: end
Whether the dataset supports common table expressions (the WITH clause). If given, type can be :select, :insert, :update, or :delete, in which case it determines whether WITH is supported for the respective statement type.
# File lib/sequel/dataset/features.rb, line 46 46: def supports_cte?(type=:select) 47: false 48: end
Whether the dataset supports common table expressions (the WITH clause) in subqueries. If false, applies the WITH clause to the main query, which can cause issues if multiple WITH clauses use the same name.
# File lib/sequel/dataset/features.rb, line 53 53: def supports_cte_in_subqueries? 54: false 55: end
Whether the database supports derived column lists (e.g. "table_expr AS table_alias(column_alias1, column_alias2, …)"), true by default.
# File lib/sequel/dataset/features.rb, line 60 60: def supports_derived_column_lists? 61: true 62: end
Whether the dataset supports the IS TRUE syntax.
# File lib/sequel/dataset/features.rb, line 96 96: def supports_is_true? 97: true 98: end
Whether the dataset supports the JOIN table USING (column1, …) syntax.
# File lib/sequel/dataset/features.rb, line 101 101: def supports_join_using? 102: true 103: end
Whether limits are supported in correlated subqueries. True by default.
# File lib/sequel/dataset/features.rb, line 111 111: def supports_limits_in_correlated_subqueries? 112: true 113: end
Whether modifying joined datasets is supported.
# File lib/sequel/dataset/features.rb, line 116 116: def supports_modifying_joins? 117: false 118: end
Whether offsets are supported in correlated subqueries, true by default.
# File lib/sequel/dataset/features.rb, line 127 127: def supports_offsets_in_correlated_subqueries? 128: true 129: end
Whether the dataset supports pattern matching by regular expressions.
# File lib/sequel/dataset/features.rb, line 138 138: def supports_regexp? 139: false 140: end
Whether the dataset supports REPLACE syntax, false by default.
# File lib/sequel/dataset/features.rb, line 143 143: def supports_replace? 144: false 145: end
Whether the database supports SELECT *, column FROM table
# File lib/sequel/dataset/features.rb, line 154 154: def supports_select_all_and_column? 155: true 156: end
Whether the dataset supports timezones in literal timestamps
# File lib/sequel/dataset/features.rb, line 159 159: def supports_timestamp_timezones? 160: false 161: end
Whether the dataset supports fractional seconds in literal timestamps
# File lib/sequel/dataset/features.rb, line 164 164: def supports_timestamp_usecs? 165: true 166: end
These methods all execute the dataset‘s SQL on the database. They don‘t return modified datasets, so if used in a method chain they should be the last method called.
ACTION_METHODS | = | (<<-METHS).split.map{|x| x.to_sym} << [] all avg count columns columns! delete each empty? fetch_rows first first! get import insert interval last map max min multi_insert paged_each range select_hash select_hash_groups select_map select_order_map single_record single_value sum to_hash to_hash_groups truncate update METHS ).split.map{|x| x.to_sym} | Action methods defined by Sequel that execute code on the database. | |
OPTS | = | Sequel::OPTS | ||
DatasetClass | = | self | ||
DatasetClass | = | self | ||
PREPARED_ARG_PLACEHOLDER | = | ':'.freeze | ||
DatasetClass | = | self | ||
DatasetClass | = | self |
Inserts the given argument into the database. Returns self so it can be used safely when chaining:
DB[:items] << {:id=>0, :name=>'Zero'} << DB[:old_items].select(:id, name)
# File lib/sequel/dataset/actions.rb, line 23 23: def <<(arg) 24: insert(arg) 25: self 26: end
Returns the first record matching the conditions. Examples:
DB[:table][:id=>1] # SELECT * FROM table WHERE (id = 1) LIMIT 1 # => {:id=1}
# File lib/sequel/dataset/actions.rb, line 32 32: def [](*conditions) 33: raise(Error, ARRAY_ACCESS_ERROR_MSG) if (conditions.length == 1 and conditions.first.is_a?(Integer)) or conditions.length == 0 34: first(*conditions) 35: end
Returns an array with all records in the dataset. If a block is given, the array is iterated over after all items have been loaded.
DB[:table].all # SELECT * FROM table # => [{:id=>1, ...}, {:id=>2, ...}, ...] # Iterate over all rows in the table DB[:table].all{|row| p row}
# File lib/sequel/dataset/actions.rb, line 45 45: def all(&block) 46: _all(block){|a| each{|r| a << r}} 47: end
Returns the average value for the given column/expression. Uses a virtual row block if no argument is given.
DB[:table].avg(:number) # SELECT avg(number) FROM table LIMIT 1 # => 3 DB[:table].avg{function(column)} # SELECT avg(function(column)) FROM table LIMIT 1 # => 1
# File lib/sequel/dataset/actions.rb, line 56 56: def avg(column=Sequel.virtual_row(&Proc.new)) 57: aggregate_dataset.get{avg(column).as(:avg)} 58: end
Returns the columns in the result set in order as an array of symbols. If the columns are currently cached, returns the cached value. Otherwise, a SELECT query is performed to retrieve a single row in order to get the columns.
If you are looking for all columns for a single table and maybe some information about each column (e.g. database type), see Database#schema.
DB[:table].columns # => [:id, :name]
# File lib/sequel/dataset/actions.rb, line 69 69: def columns 70: return @columns if @columns 71: ds = unfiltered.unordered.naked.clone(:distinct => nil, :limit => 1, :offset=>nil) 72: ds.each{break} 73: @columns = ds.instance_variable_get(:@columns) 74: @columns || [] 75: end
Returns the number of records in the dataset. If an argument is provided, it is used as the argument to count. If a block is provided, it is treated as a virtual row, and the result is used as the argument to count.
DB[:table].count # SELECT count(*) AS count FROM table LIMIT 1 # => 3 DB[:table].count(:column) # SELECT count(column) AS count FROM table LIMIT 1 # => 2 DB[:table].count{foo(column)} # SELECT count(foo(column)) AS count FROM table LIMIT 1 # => 1
# File lib/sequel/dataset/actions.rb, line 98 98: def count(arg=(no_arg=true), &block) 99: if no_arg 100: if block 101: arg = Sequel.virtual_row(&block) 102: aggregate_dataset.get{count(arg).as(:count)} 103: else 104: aggregate_dataset.get{count{}.*.as(:count)}.to_i 105: end 106: elsif block 107: raise Error, 'cannot provide both argument and block to Dataset#count' 108: else 109: aggregate_dataset.get{count(arg).as(:count)} 110: end 111: end
Deletes the records in the dataset. The returned value should be number of records deleted, but that is adapter dependent.
DB[:table].delete # DELETE * FROM table # => 3
# File lib/sequel/dataset/actions.rb, line 118 118: def delete(&block) 119: sql = delete_sql 120: if uses_returning?(:delete) 121: returning_fetch_rows(sql, &block) 122: else 123: execute_dui(sql) 124: end 125: end
Iterates over the records in the dataset as they are yielded from the database adapter, and returns self.
DB[:table].each{|row| p row} # SELECT * FROM table
Note that this method is not safe to use on many adapters if you are running additional queries inside the provided block. If you are running queries inside the block, you should use all instead of each for the outer queries, or use a separate thread or shard inside each.
# File lib/sequel/dataset/actions.rb, line 136 136: def each 137: if row_proc = @row_proc 138: fetch_rows(select_sql){|r| yield row_proc.call(r)} 139: else 140: fetch_rows(select_sql){|r| yield r} 141: end 142: self 143: end
Returns true if no records exist in the dataset, false otherwise
DB[:table].empty? # SELECT 1 AS one FROM table LIMIT 1 # => false
# File lib/sequel/dataset/actions.rb, line 149 149: def empty? 150: ds = @opts[:order] ? unordered : self 151: ds.get(Sequel::SQL::AliasedExpression.new(1, :one)).nil? 152: end
Yield all rows matching this dataset. If the dataset is set to split multiple statements, yield arrays of hashes one per statement instead of yielding results for all statements as hashes.
# File lib/sequel/adapters/mysql.rb, line 295 295: def fetch_rows(sql) 296: execute(sql) do |r| 297: i = -1 298: cps = db.conversion_procs 299: cols = r.fetch_fields.map do |f| 300: # Pretend tinyint is another integer type if its length is not 1, to 301: # avoid casting to boolean if Sequel::MySQL.convert_tinyint_to_bool 302: # is set. 303: type_proc = f.type == 1 && cast_tinyint_integer?(f) ? cps[2] : cps[f.type] 304: [output_identifier(f.name), type_proc, i+=1] 305: end 306: @columns = cols.map{|c| c.first} 307: if opts[:split_multiple_result_sets] 308: s = [] 309: yield_rows(r, cols){|h| s << h} 310: yield s 311: else 312: yield_rows(r, cols){|h| yield h} 313: end 314: end 315: self 316: end
Set the columns and yield the hashes to the block.
# File lib/sequel/adapters/swift.rb, line 132 132: def fetch_rows(sql) 133: execute(sql) do |res| 134: col_map = {} 135: @columns = res.fields.map do |c| 136: col_map[c] = output_identifier(c) 137: end 138: tz = db.timezone if Sequel.application_timezone 139: res.each do |r| 140: h = {} 141: r.each do |k, v| 142: h[col_map[k]] = case v 143: when StringIO 144: SQL::Blob.new(v.read) 145: when DateTime 146: tz ? Sequel.database_to_application_timestamp(Sequel.send(:convert_input_datetime_no_offset, v, tz)) : v 147: else 148: v 149: end 150: end 151: yield h 152: end 153: end 154: self 155: end
Yield a hash for each row in the dataset.
# File lib/sequel/adapters/sqlite.rb, line 361 361: def fetch_rows(sql) 362: execute(sql) do |result| 363: i = -1 364: cps = db.conversion_procs 365: type_procs = result.types.map{|t| cps[base_type_name(t)]} 366: cols = result.columns.map{|c| i+=1; [output_identifier(c), i, type_procs[i]]} 367: @columns = cols.map{|c| c.first} 368: result.each do |values| 369: row = {} 370: cols.each do |name,id,type_proc| 371: v = values[id] 372: if type_proc && v 373: v = type_proc.call(v) 374: end 375: row[name] = v 376: end 377: yield row 378: end 379: end 380: end
Yield all rows matching this dataset. If the dataset is set to split multiple statements, yield arrays of hashes one per statement instead of yielding results for all statements as hashes.
# File lib/sequel/adapters/sqlanywhere.rb, line 143 143: def fetch_rows(sql) 144: db = @db 145: cps = db.conversion_procs 146: api = db.api 147: execute(sql) do |rs| 148: convert = (convert_smallint_to_bool and db.convert_smallint_to_bool) 149: col_infos = [] 150: api.sqlany_num_cols(rs).times do |i| 151: _, _, name, _, type = api.sqlany_get_column_info(rs, i) 152: cp = if type == 500 153: cps[500] if convert 154: else 155: cps[type] 156: end 157: col_infos << [i, output_identifier(name), cp] 158: end 159: 160: @columns = col_infos.map{|a| a[1]} 161: 162: if rs 163: while api.sqlany_fetch_next(rs) == 1 164: h = {} 165: col_infos.each do |i, name, cp| 166: _, v = api.sqlany_get_column(rs, i) 167: h[name] = cp && v ? cp[v] : v 168: end 169: yield h 170: end 171: end 172: end 173: self 174: end
If a integer argument is given, it is interpreted as a limit, and then returns all matching records up to that limit. If no argument is passed, it returns the first matching record. If any other type of argument(s) is passed, it is given to filter and the first matching record is returned. If a block is given, it is used to filter the dataset before returning anything.
If there are no records in the dataset, returns nil (or an empty array if an integer argument is given).
Examples:
DB[:table].first # SELECT * FROM table LIMIT 1 # => {:id=>7} DB[:table].first(2) # SELECT * FROM table LIMIT 2 # => [{:id=>6}, {:id=>4}] DB[:table].first(:id=>2) # SELECT * FROM table WHERE (id = 2) LIMIT 1 # => {:id=>2} DB[:table].first("id = 3") # SELECT * FROM table WHERE (id = 3) LIMIT 1 # => {:id=>3} DB[:table].first("id = ?", 4) # SELECT * FROM table WHERE (id = 4) LIMIT 1 # => {:id=>4} DB[:table].first{id > 2} # SELECT * FROM table WHERE (id > 2) LIMIT 1 # => {:id=>5} DB[:table].first("id > ?", 4){id < 6} # SELECT * FROM table WHERE ((id > 4) AND (id < 6)) LIMIT 1 # => {:id=>5} DB[:table].first(2){id < 2} # SELECT * FROM table WHERE (id < 2) LIMIT 2 # => [{:id=>1}]
# File lib/sequel/dataset/actions.rb, line 189 189: def first(*args, &block) 190: ds = block ? filter(&block) : self 191: 192: if args.empty? 193: ds.single_record 194: else 195: args = (args.size == 1) ? args.first : args 196: if args.is_a?(Integer) 197: ds.limit(args).all 198: else 199: ds.filter(args).single_record 200: end 201: end 202: end
Calls first. If first returns nil (signaling that no row matches), raise a Sequel::NoMatchingRow exception.
# File lib/sequel/dataset/actions.rb, line 206 206: def first!(*args, &block) 207: first(*args, &block) || raise(Sequel::NoMatchingRow) 208: end
Return the column value for the first matching record in the dataset. Raises an error if both an argument and block is given.
DB[:table].get(:id) # SELECT id FROM table LIMIT 1 # => 3 ds.get{sum(id)} # SELECT sum(id) AS v FROM table LIMIT 1 # => 6
You can pass an array of arguments to return multiple arguments, but you must make sure each element in the array has an alias that Sequel can determine:
DB[:table].get([:id, :name]) # SELECT id, name FROM table LIMIT 1 # => [3, 'foo'] DB[:table].get{[sum(id).as(sum), name]} # SELECT sum(id) AS sum, name FROM table LIMIT 1 # => [6, 'foo']
# File lib/sequel/dataset/actions.rb, line 228 228: def get(column=(no_arg=true; nil), &block) 229: ds = naked 230: if block 231: raise(Error, ARG_BLOCK_ERROR_MSG) unless no_arg 232: ds = ds.select(&block) 233: column = ds.opts[:select] 234: column = nil if column.is_a?(Array) && column.length < 2 235: else 236: ds = if column.is_a?(Array) 237: ds.select(*column) 238: else 239: ds.select(auto_alias_expression(column)) 240: end 241: end 242: 243: if column.is_a?(Array) 244: if r = ds.single_record 245: r.values_at(*hash_key_symbols(column)) 246: end 247: else 248: ds.single_value 249: end 250: end
Don‘t allow graphing a dataset that splits multiple statements
# File lib/sequel/adapters/mysql.rb, line 319 319: def graph(*) 320: raise(Error, "Can't graph a dataset that splits multiple result sets") if opts[:split_multiple_result_sets] 321: super 322: end
Inserts multiple records into the associated table. This method can be used to efficiently insert a large number of records into a table in a single query if the database supports it. Inserts are automatically wrapped in a transaction.
This method is called with a columns array and an array of value arrays:
DB[:table].import([:x, :y], [[1, 2], [3, 4]]) # INSERT INTO table (x, y) VALUES (1, 2) # INSERT INTO table (x, y) VALUES (3, 4)
This method also accepts a dataset instead of an array of value arrays:
DB[:table].import([:x, :y], DB[:table2].select(:a, :b)) # INSERT INTO table (x, y) SELECT a, b FROM table2
Options:
:commit_every : | Open a new transaction for every given number of records. For example, if you provide a value of 50, will commit after every 50 records. |
:return : | When the :value is :primary_key, returns an array of autoincremented primary key values for the rows inserted. |
:server : | Set the server/shard to use for the transaction and insert queries. |
:slice : | Same as :commit_every, :commit_every takes precedence. |
# File lib/sequel/dataset/actions.rb, line 277 277: def import(columns, values, opts=OPTS) 278: return @db.transaction{insert(columns, values)} if values.is_a?(Dataset) 279: 280: return if values.empty? 281: raise(Error, IMPORT_ERROR_MSG) if columns.empty? 282: ds = opts[:server] ? server(opts[:server]) : self 283: 284: if slice_size = opts.fetch(:commit_every, opts.fetch(:slice, default_import_slice)) 285: offset = 0 286: rows = [] 287: while offset < values.length 288: rows << ds._import(columns, values[offset, slice_size], opts) 289: offset += slice_size 290: end 291: rows.flatten 292: else 293: ds._import(columns, values, opts) 294: end 295: end
Inserts values into the associated table. The returned value is generally the value of the primary key for the inserted row, but that is adapter dependent.
insert handles a number of different argument formats:
no arguments or single empty hash : | Uses DEFAULT VALUES |
single hash : | Most common format, treats keys as columns an values as values |
single array : | Treats entries as values, with no columns |
two arrays : | Treats first array as columns, second array as values |
single Dataset : | Treats as an insert based on a selection from the dataset given, with no columns |
array and dataset : | Treats as an insert based on a selection from the dataset given, with the columns given by the array. |
Examples:
DB[:items].insert # INSERT INTO items DEFAULT VALUES DB[:items].insert({}) # INSERT INTO items DEFAULT VALUES DB[:items].insert([1,2,3]) # INSERT INTO items VALUES (1, 2, 3) DB[:items].insert([:a, :b], [1,2]) # INSERT INTO items (a, b) VALUES (1, 2) DB[:items].insert(:a => 1, :b => 2) # INSERT INTO items (a, b) VALUES (1, 2) DB[:items].insert(DB[:old_items]) # INSERT INTO items SELECT * FROM old_items DB[:items].insert([:a, :b], DB[:old_items]) # INSERT INTO items (a, b) SELECT * FROM old_items
# File lib/sequel/dataset/actions.rb, line 332 332: def insert(*values, &block) 333: sql = insert_sql(*values) 334: if uses_returning?(:insert) 335: returning_fetch_rows(sql, &block) 336: else 337: execute_insert(sql) 338: end 339: end
Returns the interval between minimum and maximum values for the given column/expression. Uses a virtual row block if no argument is given.
DB[:table].interval(:id) # SELECT (max(id) - min(id)) FROM table LIMIT 1 # => 6 DB[:table].interval{function(column)} # SELECT (max(function(column)) - min(function(column))) FROM table LIMIT 1 # => 7
# File lib/sequel/dataset/actions.rb, line 348 348: def interval(column=Sequel.virtual_row(&Proc.new)) 349: aggregate_dataset.get{(max(column) - min(column)).as(:interval)} 350: end
Reverses the order and then runs first with the given arguments and block. Note that this will not necessarily give you the last record in the dataset, unless you have an unambiguous order. If there is not currently an order for this dataset, raises an Error.
DB[:table].order(:id).last # SELECT * FROM table ORDER BY id DESC LIMIT 1 # => {:id=>10} DB[:table].order(Sequel.desc(:id)).last(2) # SELECT * FROM table ORDER BY id ASC LIMIT 2 # => [{:id=>1}, {:id=>2}]
# File lib/sequel/dataset/actions.rb, line 362 362: def last(*args, &block) 363: raise(Error, 'No order specified') unless @opts[:order] 364: reverse.first(*args, &block) 365: end
Maps column values for each record in the dataset (if a column name is given), or performs the stock mapping functionality of Enumerable otherwise. Raises an Error if both an argument and block are given.
DB[:table].map(:id) # SELECT * FROM table # => [1, 2, 3, ...] DB[:table].map{|r| r[:id] * 2} # SELECT * FROM table # => [2, 4, 6, ...]
You can also provide an array of column names:
DB[:table].map([:id, :name]) # SELECT * FROM table # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]
# File lib/sequel/dataset/actions.rb, line 381 381: def map(column=nil, &block) 382: if column 383: raise(Error, ARG_BLOCK_ERROR_MSG) if block 384: return naked.map(column) if row_proc 385: if column.is_a?(Array) 386: super(){|r| r.values_at(*column)} 387: else 388: super(){|r| r[column]} 389: end 390: else 391: super(&block) 392: end 393: end
Returns the maximum value for the given column/expression. Uses a virtual row block if no argument is given.
DB[:table].max(:id) # SELECT max(id) FROM table LIMIT 1 # => 10 DB[:table].max{function(column)} # SELECT max(function(column)) FROM table LIMIT 1 # => 7
# File lib/sequel/dataset/actions.rb, line 402 402: def max(column=Sequel.virtual_row(&Proc.new)) 403: aggregate_dataset.get{max(column).as(:max)} 404: end
Returns the minimum value for the given column/expression. Uses a virtual row block if no argument is given.
DB[:table].min(:id) # SELECT min(id) FROM table LIMIT 1 # => 1 DB[:table].min{function(column)} # SELECT min(function(column)) FROM table LIMIT 1 # => 0
# File lib/sequel/dataset/actions.rb, line 413 413: def min(column=Sequel.virtual_row(&Proc.new)) 414: aggregate_dataset.get{min(column).as(:min)} 415: end
This is a front end for import that allows you to submit an array of hashes instead of arrays of columns and values:
DB[:table].multi_insert([{:x => 1}, {:x => 2}]) # INSERT INTO table (x) VALUES (1) # INSERT INTO table (x) VALUES (2)
Be aware that all hashes should have the same keys if you use this calling method, otherwise some columns could be missed or set to null instead of to default values.
This respects the same options as import.
# File lib/sequel/dataset/actions.rb, line 429 429: def multi_insert(hashes, opts=OPTS) 430: return if hashes.empty? 431: columns = hashes.first.keys 432: import(columns, hashes.map{|h| columns.map{|c| h[c]}}, opts) 433: end
Yields each row in the dataset, but interally uses multiple queries as needed to process the entire result set without keeping all rows in the dataset in memory, even if the underlying driver buffers all query results in memory.
Because this uses multiple queries internally, in order to remain consistent, it also uses a transaction internally. Additionally, to work correctly, the dataset must have unambiguous order. Using an ambiguous order can result in an infinite loop, as well as subtler bugs such as yielding duplicate rows or rows being skipped.
Sequel checks that the datasets using this method have an order, but it cannot ensure that the order is unambiguous.
Options:
:rows_per_fetch : | The number of rows to fetch per query. Defaults to 1000. |
:strategy : | The strategy to use for paging of results. By default this is :offset, for using an approach with a limit and offset for every page. This can be set to :filter, which uses a limit and a filter that excludes rows from previous pages. In order for this strategy to work, you must be selecting the columns you are ordering by, and none of the columns can contain NULLs. Note that some Sequel adapters have optimized implementations that will use cursors or streaming regardless of the :strategy option used. |
:filter_values : | If the :strategy=>:filter option is used, this option should be a proc that accepts the last retreived row for the previous page and an array of ORDER BY expressions, and returns an array of values relating to those expressions for the last retrieved row. You will need to use this option if your ORDER BY expressions are not simple columns, if they contain qualified identifiers that would be ambiguous unqualified, if they contain any identifiers that are aliased in SELECT, and potentially other cases. |
Examples:
DB[:table].order(:id).paged_each{|row| } # SELECT * FROM table ORDER BY id LIMIT 1000 # SELECT * FROM table ORDER BY id LIMIT 1000 OFFSET 1000 # ... DB[:table].order(:id).paged_each(:rows_per_fetch=>100){|row| } # SELECT * FROM table ORDER BY id LIMIT 100 # SELECT * FROM table ORDER BY id LIMIT 100 OFFSET 100 # ... DB[:table].order(:id).paged_each(:strategy=>:filter){|row| } # SELECT * FROM table ORDER BY id LIMIT 1000 # SELECT * FROM table WHERE id > 1001 ORDER BY id LIMIT 1000 # ... DB[:table].order(:table__id).paged_each(:strategy=>:filter, :filter_values=>proc{|row, exprs| [row[:id]]}){|row| } # SELECT * FROM table ORDER BY id LIMIT 1000 # SELECT * FROM table WHERE id > 1001 ORDER BY id LIMIT 1000 # ...
# File lib/sequel/dataset/actions.rb, line 486 486: def paged_each(opts=OPTS) 487: unless @opts[:order] 488: raise Sequel::Error, "Dataset#paged_each requires the dataset be ordered" 489: end 490: unless block_given? 491: return enum_for(:paged_each, opts) 492: end 493: 494: total_limit = @opts[:limit] 495: offset = @opts[:offset] 496: if server = @opts[:server] 497: opts = opts.merge(:server=>server) 498: end 499: 500: rows_per_fetch = opts[:rows_per_fetch] || 1000 501: strategy = if offset || total_limit 502: :offset 503: else 504: opts[:strategy] || :offset 505: end 506: 507: db.transaction(opts) do 508: case strategy 509: when :filter 510: filter_values = opts[:filter_values] || proc{|row, exprs| exprs.map{|e| row[hash_key_symbol(e)]}} 511: base_ds = ds = limit(rows_per_fetch) 512: while ds 513: last_row = nil 514: ds.each do |row| 515: last_row = row 516: yield row 517: end 518: ds = (base_ds.where(ignore_values_preceding(last_row, &filter_values)) if last_row) 519: end 520: else 521: offset ||= 0 522: num_rows_yielded = rows_per_fetch 523: total_rows = 0 524: 525: while num_rows_yielded == rows_per_fetch && (total_limit.nil? || total_rows < total_limit) 526: if total_limit && total_rows + rows_per_fetch > total_limit 527: rows_per_fetch = total_limit - total_rows 528: end 529: 530: num_rows_yielded = 0 531: limit(rows_per_fetch, offset).each do |row| 532: num_rows_yielded += 1 533: total_rows += 1 if total_limit 534: yield row 535: end 536: 537: offset += rows_per_fetch 538: end 539: end 540: end 541: 542: self 543: end
Prepare the given type of query with the given name and store it in the database. Note that a new native prepared statement is created on each call to this prepared statement.
# File lib/sequel/adapters/sqlite.rb, line 385 385: def prepare(type, name=nil, *values) 386: ps = to_prepared_statement(type, values) 387: ps.extend(PreparedStatementMethods) 388: if name 389: ps.prepared_statement_name = name 390: db.set_prepared_statement(name, ps) 391: end 392: ps 393: end
Returns a Range instance made from the minimum and maximum values for the given column/expression. Uses a virtual row block if no argument is given.
DB[:table].range(:id) # SELECT max(id) AS v1, min(id) AS v2 FROM table LIMIT 1 # => 1..10 DB[:table].interval{function(column)} # SELECT max(function(column)) AS v1, min(function(column)) AS v2 FROM table LIMIT 1 # => 0..7
# File lib/sequel/dataset/actions.rb, line 552 552: def range(column=Sequel.virtual_row(&Proc.new)) 553: if r = aggregate_dataset.select{[min(column).as(v1), max(column).as(v2)]}.first 554: (r[:v1]..r[:v2]) 555: end 556: end
Returns a hash with key_column values as keys and value_column values as values. Similar to to_hash, but only selects the columns given.
DB[:table].select_hash(:id, :name) # SELECT id, name FROM table # => {1=>'a', 2=>'b', ...}
You can also provide an array of column names for either the key_column, the value column, or both:
DB[:table].select_hash([:id, :foo], [:name, :bar]) # SELECT * FROM table # {[1, 3]=>['a', 'c'], [2, 4]=>['b', 'd'], ...}
When using this method, you must be sure that each expression has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.
# File lib/sequel/dataset/actions.rb, line 573 573: def select_hash(key_column, value_column) 574: _select_hash(:to_hash, key_column, value_column) 575: end
Returns a hash with key_column values as keys and an array of value_column values. Similar to to_hash_groups, but only selects the columns given.
DB[:table].select_hash_groups(:name, :id) # SELECT id, name FROM table # => {'a'=>[1, 4, ...], 'b'=>[2, ...], ...}
You can also provide an array of column names for either the key_column, the value column, or both:
DB[:table].select_hash_groups([:first, :middle], [:last, :id]) # SELECT * FROM table # {['a', 'b']=>[['c', 1], ['d', 2], ...], ...}
When using this method, you must be sure that each expression has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.
# File lib/sequel/dataset/actions.rb, line 592 592: def select_hash_groups(key_column, value_column) 593: _select_hash(:to_hash_groups, key_column, value_column) 594: end
Selects the column given (either as an argument or as a block), and returns an array of all values of that column in the dataset. If you give a block argument that returns an array with multiple entries, the contents of the resulting array are undefined. Raises an Error if called with both an argument and a block.
DB[:table].select_map(:id) # SELECT id FROM table # => [3, 5, 8, 1, ...] DB[:table].select_map{id * 2} # SELECT (id * 2) FROM table # => [6, 10, 16, 2, ...]
You can also provide an array of column names:
DB[:table].select_map([:id, :name]) # SELECT id, name FROM table # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]
If you provide an array of expressions, you must be sure that each entry in the array has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.
# File lib/sequel/dataset/actions.rb, line 616 616: def select_map(column=nil, &block) 617: _select_map(column, false, &block) 618: end
The same as select_map, but in addition orders the array by the column.
DB[:table].select_order_map(:id) # SELECT id FROM table ORDER BY id # => [1, 2, 3, 4, ...] DB[:table].select_order_map{id * 2} # SELECT (id * 2) FROM table ORDER BY (id * 2) # => [2, 4, 6, 8, ...]
You can also provide an array of column names:
DB[:table].select_order_map([:id, :name]) # SELECT id, name FROM table ORDER BY id, name # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]
If you provide an array of expressions, you must be sure that each entry in the array has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.
# File lib/sequel/dataset/actions.rb, line 636 636: def select_order_map(column=nil, &block) 637: _select_map(column, true, &block) 638: end
Makes each yield arrays of rows, with each array containing the rows for a given result set. Does not work with graphing. So you can submit SQL with multiple statements and easily determine which statement returned which results.
Modifies the row_proc of the returned dataset so that it still works as expected (running on the hashes instead of on the arrays of hashes). If you modify the row_proc afterward, note that it will receive an array of hashes instead of a hash.
# File lib/sequel/adapters/mysql.rb, line 333 333: def split_multiple_result_sets 334: raise(Error, "Can't split multiple statements on a graphed dataset") if opts[:graph] 335: ds = clone(:split_multiple_result_sets=>true) 336: ds.row_proc = proc{|x| x.map{|h| row_proc.call(h)}} if row_proc 337: ds 338: end
Returns the sum for the given column/expression. Uses a virtual row block if no column is given.
DB[:table].sum(:id) # SELECT sum(id) FROM table LIMIT 1 # => 55 DB[:table].sum{function(column)} # SELECT sum(function(column)) FROM table LIMIT 1 # => 10
# File lib/sequel/dataset/actions.rb, line 664 664: def sum(column=Sequel.virtual_row(&Proc.new)) 665: aggregate_dataset.get{sum(column).as(:sum)} 666: end
Returns a hash with one column used as key and another used as value. If rows have duplicate values for the key column, the latter row(s) will overwrite the value of the previous row(s). If the value_column is not given or nil, uses the entire hash as the value.
DB[:table].to_hash(:id, :name) # SELECT * FROM table # {1=>'Jim', 2=>'Bob', ...} DB[:table].to_hash(:id) # SELECT * FROM table # {1=>{:id=>1, :name=>'Jim'}, 2=>{:id=>2, :name=>'Bob'}, ...}
You can also provide an array of column names for either the key_column, the value column, or both:
DB[:table].to_hash([:id, :foo], [:name, :bar]) # SELECT * FROM table # {[1, 3]=>['Jim', 'bo'], [2, 4]=>['Bob', 'be'], ...} DB[:table].to_hash([:id, :name]) # SELECT * FROM table # {[1, 'Jim']=>{:id=>1, :name=>'Jim'}, [2, 'Bob'=>{:id=>2, :name=>'Bob'}, ...}
# File lib/sequel/dataset/actions.rb, line 687 687: def to_hash(key_column, value_column = nil) 688: h = {} 689: if value_column 690: return naked.to_hash(key_column, value_column) if row_proc 691: if value_column.is_a?(Array) 692: if key_column.is_a?(Array) 693: each{|r| h[r.values_at(*key_column)] = r.values_at(*value_column)} 694: else 695: each{|r| h[r[key_column]] = r.values_at(*value_column)} 696: end 697: else 698: if key_column.is_a?(Array) 699: each{|r| h[r.values_at(*key_column)] = r[value_column]} 700: else 701: each{|r| h[r[key_column]] = r[value_column]} 702: end 703: end 704: elsif key_column.is_a?(Array) 705: each{|r| h[r.values_at(*key_column)] = r} 706: else 707: each{|r| h[r[key_column]] = r} 708: end 709: h 710: end
Returns a hash with one column used as key and the values being an array of column values. If the value_column is not given or nil, uses the entire hash as the value.
DB[:table].to_hash_groups(:name, :id) # SELECT * FROM table # {'Jim'=>[1, 4, 16, ...], 'Bob'=>[2], ...} DB[:table].to_hash_groups(:name) # SELECT * FROM table # {'Jim'=>[{:id=>1, :name=>'Jim'}, {:id=>4, :name=>'Jim'}, ...], 'Bob'=>[{:id=>2, :name=>'Bob'}], ...}
You can also provide an array of column names for either the key_column, the value column, or both:
DB[:table].to_hash_groups([:first, :middle], [:last, :id]) # SELECT * FROM table # {['Jim', 'Bob']=>[['Smith', 1], ['Jackson', 4], ...], ...} DB[:table].to_hash_groups([:first, :middle]) # SELECT * FROM table # {['Jim', 'Bob']=>[{:id=>1, :first=>'Jim', :middle=>'Bob', :last=>'Smith'}, ...], ...}
# File lib/sequel/dataset/actions.rb, line 730 730: def to_hash_groups(key_column, value_column = nil) 731: h = {} 732: if value_column 733: return naked.to_hash_groups(key_column, value_column) if row_proc 734: if value_column.is_a?(Array) 735: if key_column.is_a?(Array) 736: each{|r| (h[r.values_at(*key_column)] ||= []) << r.values_at(*value_column)} 737: else 738: each{|r| (h[r[key_column]] ||= []) << r.values_at(*value_column)} 739: end 740: else 741: if key_column.is_a?(Array) 742: each{|r| (h[r.values_at(*key_column)] ||= []) << r[value_column]} 743: else 744: each{|r| (h[r[key_column]] ||= []) << r[value_column]} 745: end 746: end 747: elsif key_column.is_a?(Array) 748: each{|r| (h[r.values_at(*key_column)] ||= []) << r} 749: else 750: each{|r| (h[r[key_column]] ||= []) << r} 751: end 752: h 753: end
Truncates the dataset. Returns nil.
DB[:table].truncate # TRUNCATE table # => nil
# File lib/sequel/dataset/actions.rb, line 759 759: def truncate 760: execute_ddl(truncate_sql) 761: end
Updates values for the dataset. The returned value is generally the number of rows updated, but that is adapter dependent. values should a hash where the keys are columns to set and values are the values to which to set the columns.
DB[:table].update(:x=>nil) # UPDATE table SET x = NULL # => 10 DB[:table].update(:x=>Sequel.expr(:x)+1, :y=>0) # UPDATE table SET x = (x + 1), y = 0 # => 10
# File lib/sequel/dataset/actions.rb, line 773 773: def update(values=OPTS, &block) 774: sql = update_sql(values) 775: if uses_returning?(:update) 776: returning_fetch_rows(sql, &block) 777: else 778: execute_dui(sql) 779: end 780: end
Execute the given SQL and return the number of rows deleted. This exists solely as an optimization, replacing with_sql(sql).delete. It‘s significantly faster as it does not require cloning the current dataset.
# File lib/sequel/dataset/actions.rb, line 791 791: def with_sql_delete(sql) 792: execute_dui(sql) 793: end
Run the given SQL and yield each returned row to the block.
This method should not be called on a shared dataset if the columns selected in the given SQL do not match the columns in the receiver.
# File lib/sequel/dataset/actions.rb, line 800 800: def with_sql_each(sql) 801: if row_proc = @row_proc 802: fetch_rows(sql){|r| yield row_proc.call(r)} 803: else 804: fetch_rows(sql){|r| yield r} 805: end 806: self 807: end
Run the given SQL and return the first value in the first row, or nil if no rows were returned. For this to make sense, the SQL given should select only a single value. See with_sql_each.
# File lib/sequel/dataset/actions.rb, line 819 819: def with_sql_single_value(sql) 820: if r = with_sql_first(sql) 821: r.values.first 822: end 823: end
Internals of import. If primary key values are requested, use separate insert commands for each row. Otherwise, call multi_insert_sql and execute each statement it gives separately.
# File lib/sequel/dataset/actions.rb, line 836 836: def _import(columns, values, opts) 837: trans_opts = opts.merge(:server=>@opts[:server]) 838: if opts[:return] == :primary_key 839: @db.transaction(trans_opts){values.map{|v| insert(columns, v)}} 840: else 841: stmts = multi_insert_sql(columns, values) 842: @db.transaction(trans_opts){stmts.each{|st| execute_dui(st)}} 843: end 844: end
Return an array of arrays of values given by the symbols in ret_cols.
# File lib/sequel/dataset/actions.rb, line 847 847: def _select_map_multiple(ret_cols) 848: map{|r| r.values_at(*ret_cols)} 849: end
These are methods you can call to see what SQL will be generated by the dataset.