class Sequel::Postgres::Dataset
Constants
- BindArgumentMethods
- PREPARED_ARG_PLACEHOLDER
-
:nocov:
- PreparedStatementMethods
Public Instance Methods
Source
# File lib/sequel/adapters/postgres.rb 739 def bound_variable_modules 740 [BindArgumentMethods] 741 end
Source
# File lib/sequel/adapters/postgres.rb 650 def fetch_rows(sql) 651 return cursor_fetch_rows(sql){|h| yield h} if @opts[:cursor] 652 execute(sql){|res| yield_hash_rows(res, fetch_rows_set_cols(res)){|h| yield h}} 653 end
Source
# File lib/sequel/adapters/postgres.rb 656 def paged_each(opts=OPTS, &block) 657 unless defined?(yield) 658 return enum_for(:paged_each, opts) 659 end 660 use_cursor(opts).each(&block) 661 end
Use a cursor for paging.
Source
# File lib/sequel/adapters/postgres.rb 749 def prepared_arg_placeholder 750 PREPARED_ARG_PLACEHOLDER 751 end
PostgreSQL uses $N for placeholders instead of ?, so use a $ as the placeholder.
Source
# File lib/sequel/adapters/postgres.rb 743 def prepared_statement_modules 744 [PreparedStatementMethods] 745 end
Source
# File lib/sequel/adapters/postgres.rb 686 def use_cursor(opts=OPTS) 687 clone(:cursor=>{:rows_per_fetch=>1000}.merge!(opts)) 688 end
Uses a cursor for fetching records, instead of fetching the entire result set at once. Note this uses a transaction around the cursor usage by default and can be changed using ‘hold: true` as described below. Cursors can be used to process large datasets without holding all rows in memory (which is what the underlying drivers may do by default). Options:
- :cursor_name
-
The name assigned to the cursor (default ‘sequel_cursor’). Nested cursors require different names.
- :hold
-
Declare the cursor WITH HOLD and don’t use transaction around the cursor usage.
- :rows_per_fetch
-
The number of rows per fetch (default 1000). Higher numbers result in fewer queries but greater memory use.
- :skip_transaction
-
Same as :hold, but :hold takes priority.
Usage:
DB[:huge_table].use_cursor.each{|row| p row} DB[:huge_table].use_cursor(rows_per_fetch: 10000).each{|row| p row} DB[:huge_table].use_cursor(cursor_name: 'my_cursor').each{|row| p row}
This is untested with the prepared statement/bound variable support, and unlikely to work with either.
Source
# File lib/sequel/adapters/postgres.rb 698 def where_current_of(cursor_name='sequel_cursor') 699 clone(:where=>Sequel.lit(['CURRENT OF '], Sequel.identifier(cursor_name))) 700 end
Replace the WHERE clause with one that uses CURRENT OF with the given cursor name (or the default cursor name). This allows you to update a large dataset by updating individual rows while processing the dataset via a cursor:
DB[:huge_table].use_cursor(rows_per_fetch: 1).each do |row| DB[:huge_table].where_current_of.update(column: ruby_method(row)) end
Private Instance Methods
Source
# File lib/sequel/adapters/postgres.rb 757 def call_procedure(name, args) 758 sql = String.new 759 sql << "CALL " 760 identifier_append(sql, name) 761 sql << "(" 762 expression_list_append(sql, args) 763 sql << ")" 764 with_sql_first(sql) 765 end
Generate and execute a procedure call.
Source
# File lib/sequel/adapters/postgres.rb 768 def cursor_fetch_rows(sql) 769 cursor = @opts[:cursor] 770 hold = cursor.fetch(:hold){cursor[:skip_transaction]} 771 server_opts = {:server=>@opts[:server] || :read_only, :skip_transaction=>hold} 772 cursor_name = quote_identifier(cursor[:cursor_name] || 'sequel_cursor') 773 rows_per_fetch = cursor[:rows_per_fetch].to_i 774 775 db.transaction(server_opts) do 776 begin 777 execute_ddl("DECLARE #{cursor_name} NO SCROLL CURSOR WITH#{'OUT' unless hold} HOLD FOR #{sql}", server_opts) 778 rows_per_fetch = 1000 if rows_per_fetch <= 0 779 fetch_sql = "FETCH FORWARD #{rows_per_fetch} FROM #{cursor_name}" 780 cols = nil 781 # Load columns only in the first fetch, so subsequent fetches are faster 782 execute(fetch_sql) do |res| 783 cols = fetch_rows_set_cols(res) 784 yield_hash_rows(res, cols){|h| yield h} 785 return if res.ntuples < rows_per_fetch 786 end 787 while true 788 execute(fetch_sql) do |res| 789 yield_hash_rows(res, cols){|h| yield h} 790 return if res.ntuples < rows_per_fetch 791 end 792 end 793 rescue Exception => e 794 raise 795 ensure 796 begin 797 execute_ddl("CLOSE #{cursor_name}", server_opts) 798 rescue 799 raise e if e 800 raise 801 end 802 end 803 end 804 end
Use a cursor to fetch groups of records at a time, yielding them to the block.
Source
# File lib/sequel/adapters/postgres.rb 808 def fetch_rows_set_cols(res) 809 cols = [] 810 procs = db.conversion_procs 811 res.nfields.times do |fieldnum| 812 cols << [procs[res.ftype(fieldnum)], output_identifier(res.fname(fieldnum))] 813 end 814 self.columns = cols.map{|c| c[1]} 815 cols 816 end
Set the columns based on the result set, and return the array of field numers, type conversion procs, and name symbol arrays.
Source
# File lib/sequel/adapters/postgres.rb 819 def literal_blob_append(sql, v) 820 sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_bytea(v)} << "'" 821 end
Use the driver’s escape_bytea
Source
# File lib/sequel/adapters/postgres.rb 824 def literal_string_append(sql, v) 825 sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_string(v)} << "'" 826 end
Use the driver’s escape_string
Source
# File lib/sequel/adapters/postgres.rb 830 def yield_hash_rows(res, cols) 831 ntuples = res.ntuples 832 recnum = 0 833 while recnum < ntuples 834 fieldnum = 0 835 nfields = cols.length 836 converted_rec = {} 837 while fieldnum < nfields 838 type_proc, fieldsym = cols[fieldnum] 839 value = res.getvalue(recnum, fieldnum) 840 converted_rec[fieldsym] = (value && type_proc) ? type_proc.call(value) : value 841 fieldnum += 1 842 end 843 yield converted_rec 844 recnum += 1 845 end 846 end
For each row in the result set, yield a hash with column name symbol keys and typecasted values.