X-Git-Url: http://git.hungrycats.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=driver%2Fxscreensaver-getimage-file;h=7b45988de5f0e49eb97261b4c002416c570ac9c2;hb=4ade52359b6eba3621566dac79793a33aa4c915f;hp=6dd262e8ccbdbadf3726def033f7876401ab4dd3;hpb=f0261d8acab611f3433160e4f07367b870439739;p=xscreensaver diff --git a/driver/xscreensaver-getimage-file b/driver/xscreensaver-getimage-file index 6dd262e8..7b45988d 100755 --- a/driver/xscreensaver-getimage-file +++ b/driver/xscreensaver-getimage-file @@ -1,5 +1,5 @@ #!/usr/bin/perl -w -# Copyright © 2001-2009 Jamie Zawinski . +# Copyright © 2001-2013 Jamie Zawinski . # # Permission to use, copy, modify, distribute, and sell this software and its # documentation for any purpose is hereby granted without fee, provided that @@ -13,12 +13,18 @@ # prints its name. The file will be an image file whose dimensions are # larger than a certain minimum size. # +# If the directory is a URL, it is assumed to be an RSS or Atom feed. +# The images from that feed will be downloaded, cached, and selected from +# at random. The feed will be re-polled periodically, as needed. +# # The various xscreensaver hacks that manipulate images ("jigsaw", etc.) get # the image to manipulate by running the "xscreensaver-getimage" program. # # Under X11, the "xscreensaver-getimage" program invokes this script, # depending on the value of the "chooseRandomImages" and "imageDirectory" # settings in the ~/.xscreensaver file (or .../app-defaults/XScreenSaver). +# The screen savers invoke "xscreensaver-getimage" via utils/grabclient.c, +# which then invokes this script. # # Under Cocoa, this script lives inside the .saver bundle, and is invoked # directly from utils/grabclient.c. @@ -43,8 +49,15 @@ use bytes; # Larry can take Unicode and shove it up his ass sideways. # Perl 5.8.0 causes us to start getting incomprehensible # errors about UTF-8 all over the place without this. +use Digest::MD5 qw(md5_base64); + +# Some Linux systems don't install LWP by default! +# Only error out if we're actually loading a URL instead of local data. +BEGIN { eval 'use LWP::Simple;' } + + my $progname = $0; $progname =~ s@.*/@@g; -my $version = q{ $Revision: 1.27 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/; +my $version = q{ $Revision: 1.35 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/; my $verbose = 0; @@ -64,6 +77,10 @@ my $cache_p = 1; # my $cache_max_age = 60 * 60 * 3; # 3 hours +# Re-poll RSS/Atom feeds when local copy is older than this many seconds. +# +my $feed_max_age = $cache_max_age; + # This matches files that we are allowed to use as images (case-insensitive.) # Anything not matching this is ignored. This is so you can point your @@ -104,18 +121,19 @@ my $stat_count = 0; # number of files/dirs stat'ed my $skip_count_unstat = 0; # number of files skipped without stat'ing my $skip_count_stat = 0; # number of files skipped after stat -sub find_all_files { +sub find_all_files($); +sub find_all_files($) { my ($dir) = @_; print STDERR "$progname: + reading dir $dir/...\n" if ($verbose > 1); - local *DIR; - if (! opendir (DIR, $dir)) { + my $dd; + if (! opendir ($dd, $dir)) { print STDERR "$progname: couldn't open $dir: $!\n" if ($verbose); return; } - my @files = readdir (DIR); - closedir (DIR); + my @files = readdir ($dd); + closedir ($dd); my @dirs = (); @@ -190,7 +208,7 @@ sub find_all_files { } -sub spotlight_all_files { +sub spotlight_all_files($) { my ($dir) = @_; my @terms = (); @@ -220,7 +238,7 @@ sub spotlight_all_files { # running at once, one will wait for the other, instead of both of # them spanking the same file system at the same time. # -local *CACHE_FILE; +my $cache_fd = undef; my $cache_file_name = undef; my $read_cache_p = 0; @@ -242,18 +260,18 @@ sub read_cache($) { if ($verbose > 1); my $file = $cache_file_name; - open (CACHE_FILE, "+>>$file") || error ("unable to write $file: $!"); - flock (CACHE_FILE, LOCK_EX) || error ("unable to lock $file: $!"); - seek (CACHE_FILE, 0, 0) || error ("unable to rewind $file: $!"); + open ($cache_fd, '+>>', $file) || error ("unable to write $file: $!"); + flock ($cache_fd, LOCK_EX) || error ("unable to lock $file: $!"); + seek ($cache_fd, 0, 0) || error ("unable to rewind $file: $!"); - my $mtime = (stat(CACHE_FILE))[9]; + my $mtime = (stat($cache_fd))[9]; if ($mtime + $cache_max_age < time) { print STDERR "$progname: cache is too old\n" if ($verbose); return (); } - my $odir = ; + my $odir = <$cache_fd>; $odir =~ s/[\r\n]+$//s if defined ($odir); if (!defined ($odir) || ($dir ne $odir)) { print STDERR "$progname: cache is for $odir, not $dir\n" @@ -262,7 +280,7 @@ sub read_cache($) { } my @files = (); - while () { + while (<$cache_fd>) { s/[\r\n]+$//s; push @files, "$odir/$_"; } @@ -285,18 +303,17 @@ sub write_cache($) { if (! $read_cache_p) { - truncate (CACHE_FILE, 0) || + truncate ($cache_fd, 0) || error ("unable to truncate $cache_file_name: $!"); - seek (CACHE_FILE, 0, 0) || + seek ($cache_fd, 0, 0) || error ("unable to rewind $cache_file_name: $!"); if ($#all_files >= 0) { - print CACHE_FILE "$dir\n"; - my $re = qr/$dir/; + print $cache_fd "$dir\n"; foreach (@all_files) { my $f = $_; # stupid Perl. do this to avoid modifying @all_files! - $f =~ s@^$re/@@so || die; - print CACHE_FILE "$f\n"; + $f =~ s@^\Q$dir\L/@@so || die; # remove $dir from front + print $cache_fd "$f\n"; } } @@ -304,17 +321,422 @@ sub write_cache($) { if ($verbose); } - flock (CACHE_FILE, LOCK_UN) || + flock ($cache_fd, LOCK_UN) || error ("unable to unlock $cache_file_name: $!"); - close (CACHE_FILE); + close ($cache_fd); + $cache_fd = undef; +} + + +sub html_unquote($) { + my ($h) = @_; + + # This only needs to handle entities that occur in RSS, not full HTML. + my %ent = ( 'amp' => '&', 'lt' => '<', 'gt' => '>', + 'quot' => '"', 'apos' => "'" ); + $h =~ s/(&(\#)?([[:alpha:]\d]+);?)/ + { + my ($o, $c) = ($1, $3); + if (! defined($2)) { + $c = $ent{$c}; # for < + } else { + if ($c =~ m@^x([\dA-F]+)$@si) { # for A + $c = chr(hex($1)); + } elsif ($c =~ m@^\d+$@si) { # for A + $c = chr($c); + } else { + $c = undef; + } + } + ($c || $o); + } + /gexi; + return $h; +} + + + +sub set_proxy($) { + my ($ua) = @_; + + if (!defined($ENV{http_proxy}) && !defined($ENV{HTTP_PROXY})) { + my $proxy_data = `scutil --proxy 2>/dev/null`; + my ($server) = ($proxy_data =~ m/\bHTTPProxy\s*:\s*([^\s]+)/s); + my ($port) = ($proxy_data =~ m/\bHTTPPort\s*:\s*([^\s]+)/s); + if ($server) { + # Note: this ignores the "ExceptionsList". + $ENV{http_proxy} = "http://" . $server . ($port ? ":$port" : "") . "/"; + print STDERR "$progname: MacOS proxy: $ENV{http_proxy}\n" + if ($verbose > 2) + } + } + + $ua->env_proxy(); +} + + +sub init_lwp() { + if (! defined ($LWP::Simple::ua)) { + error ("\n\n\tPerl is broken. Do this to repair it:\n" . + "\n\tsudo cpan LWP::Simple\n"); + } + set_proxy ($LWP::Simple::ua); +} + + +# Returns a list of the image enclosures in the RSS or Atom feed. +# Elements of the list are references, [ "url", "guid" ]. +# +sub parse_feed($); +sub parse_feed($) { + my ($url) = @_; + + init_lwp(); + $LWP::Simple::ua->agent ("$progname/$version"); + $LWP::Simple::ua->timeout (10); # bail sooner than the default of 3 minutes + + my $body = (LWP::Simple::get($url) || ''); + + if ($body !~ m@^<\?xml\s@si) { + # Not an RSS/Atom feed. Try RSS autodiscovery. + + error ("not an RSS or Atom feed, or HTML: $url") + unless ($body =~ m@<(HEAD|BODY|A|IMG)\b@si); + + # Find the first with RSS or Atom in it, and use that instead. + + $body =~ s@]*)>@{ + my $p = $1; + if ($p =~ m! \b REL \s* = \s* ['"]? alternate \b!six && + $p =~ m! \b TYPE \s* = \s* ['"]? application/(atom|rss) !six && + $p =~ m! \b HREF \s* = \s* ['"] ( [^<>'"]+ ) !six + ) { + my $u2 = html_unquote ($1); + print STDERR "$progname: found feed: $u2\n" + if ($verbose); + return parse_feed ($u2); + } + ''; + }@gsexi; + + error ("no RSS or Atom feed for HTML page: $url"); + } + + + $body =~ s@( + # + if (! $iurl) { + $item =~ s!(]*>)!{ + my $link = $1; + my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si); + my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si); + my ($href) = ($link =~ m/\bHREF\s*=\s*[\"\']([^<>\'\"]+)/si); + + if ($rel && lc($rel) eq 'enclosure') { + if ($type) { + $href = undef unless ($type =~ m@^image/@si); # omit videos + } + $iurl = html_unquote($href) if $href; + } + $link; + }!gsexi; + } + + # Then look for + # + if (! $iurl) { + $item =~ s!(]*>)!{ + my $link = $1; + my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si); + $iurl = html_unquote($href) if $href; + $link; + }!gsexi; + } + + # Then look for + # + if (! $iurl) { + $item =~ s!(]*>)!{ + my $link = $1; + my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si); + my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si); + $iurl = html_unquote($href) + if ($href && $type && $type =~ m@^image/@si); # omit videos + $link; + }!gsexi; + } + + # Ok, maybe there's an image in the field? + # + if (! $iurl) { + $item =~ s!((]*>)([^<>]*))!{ + my ($all, $u2) = ($1, $3); + $iurl = html_unquote($u2) if ($u2 =~ m/$good_file_re/io); + $all; + }!gsexi; + } + + # Then look for ... with an inside. + # + if (! $iurl) { + $item =~ s!(]*>.*?)!{ + my $desc = $1; + $desc = html_unquote($desc); + my ($href) = ($desc =~ m@]*\bSRC=[\"\']?([^\"\'<>]+)@si); + $iurl = $href if ($href); + $desc; + }!gsexi; + } + + # Could also do , but the above probably covers all + # of the real-world possibilities. + + + # Find a unique ID for this image, to defeat image farms. + # First look for ... + ($id) = ($item =~ m!]*>\s*([^<>]+?)\s*!si) unless $id; + + # Then look for ... + ($id) = ($item =~ m!]*>\s*([^<>]+?)\s*!si) unless $id; + + # Then look for ... + ($id) = ($item =~ m!]*>\s*([^<>]+?)\s*!si) unless $id; + + + if ($iurl) { + $id = $iurl unless $id; + my $o = $ids{$id}; + if (! $o) { + $ids{$id} = $iurl; + my @P = ($iurl, $id); + push @imgs, \@P; + } elsif ($iurl ne $o) { + print STDERR "$progname: WARNING: dup ID \"$id\"" . + " for \"$o\" and \"$iurl\"\n"; + } + } + } + + return @imgs; +} + + +# Like md5_base64 but uses filename-safe characters. +# +sub md5_file($) { + my ($s) = @_; + $s = md5_base64($s); + $s =~ s@[/]@_@gs; + $s =~ s@[+]@-@gs; + return $s; +} + + +# Given the URL of an image, download it into the given directory +# and return the file name. +# +sub download_image($$$) { + my ($url, $uid, $dir) = @_; + + my ($ext) = ($url =~ m@\.([a-z\d]+)$@si); + my $file = md5_file ($uid); + $file .= '.' . lc($ext) if $ext; + + # Don't bother doing If-Modified-Since to see if the URL has changed. + # If we have already downloaded it, assume it's good. + if (-f "$dir/$file") { + print STDERR "$progname: exists: $dir/$file for $uid / $url\n" + if ($verbose > 1); + return $file; + } + + # Special-case kludge for Flickr: + # Their RSS feeds sometimes include only the small versions of the images. + # So if the URL ends in "s" (75x75), "t" (100x100) or "m" (240x240),then + # munge it to be "b" (1024x1024). + # + $url =~ s@_[stm](\.[a-z]+)$@_b$1@si + if ($url =~ m@^https?://[^/?#&]*?flickr\.com/@si); + + print STDERR "$progname: downloading: $dir/$file for $uid / $url\n" + if ($verbose > 1); + init_lwp(); + $LWP::Simple::ua->agent ("$progname/$version"); + my $status = LWP::Simple::mirror ($url, "$dir/$file"); + if (!LWP::Simple::is_success ($status)) { + print STDERR "$progname: error $status: $url\n"; # keep going + } + + return $file; +} + + +sub mirror_feed($) { + my ($url) = @_; + + if ($url !~ m/^https?:/si) { # not a URL: local directory. + return (undef, $url); + } + + my $dir = "$ENV{HOME}/Library/Caches"; # MacOS location + if (-d $dir) { + $dir = "$dir/org.jwz.xscreensaver.feeds"; + } elsif (-d "$ENV{HOME}/tmp") { + $dir = "$ENV{HOME}/tmp/.xscreensaver-feeds"; + } else { + $dir = "$ENV{HOME}/.xscreensaver-feeds"; + } + + if (! -d $dir) { + mkdir ($dir) || error ("mkdir $dir: $!"); + print STDERR "$progname: mkdir $dir/\n" if ($verbose); + } + + # MD5 for directory name to use for cache of a feed URL. + $dir .= '/' . md5_file ($url); + + if (! -d $dir) { + mkdir ($dir) || error ("mkdir $dir: $!"); + print STDERR "$progname: mkdir $dir/ for $url\n" if ($verbose); + } + + # At this point, we have the directory corresponding to this URL. + # Now check to see if the files in it are up to date, and download + # them if not. + + my $stamp = '.timestamp'; + my $lock = "$dir/$stamp"; + + print STDERR "$progname: awaiting lock: $lock\n" + if ($verbose > 1); + + my $mtime = ((stat($lock))[9]) || 0; + + my $lock_fd; + open ($lock_fd, '+>>', $lock) || error ("unable to write $lock: $!"); + flock ($lock_fd, LOCK_EX) || error ("unable to lock $lock: $!"); + seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!"); + + my $poll_p = ($mtime + $feed_max_age < time); + + $poll_p = 1 unless ($cache_p); # poll again now with --no-cache cmd line arg. + + # Even if the cache is young, let's make sure there are at least + # a few files in it, and re-check if not. + # + if (! $poll_p) { + my $count = 0; + opendir (my $dirh, $dir) || error ("$dir: $!"); + foreach my $f (readdir ($dirh)) { + next if ($f =~ m/^\./s); + $count++; + last; + } + closedir $dirh; + + if ($count <= 0) { + print STDERR "$progname: no files in cache of $url\n" if ($verbose); + $poll_p = 1; + } + } + + if ($poll_p) { + + print STDERR "$progname: loading $url\n" if ($verbose); + + my %files; + opendir (my $dirh, $dir) || error ("$dir: $!"); + foreach my $f (readdir ($dirh)) { + next if ($f eq '.' || $f eq '..'); + $files{$f} = 0; # 0 means "file exists, should be deleted" + } + closedir $dirh; + + $files{$stamp} = 1; + + # Download each image currently in the feed. + # + my $count = 0; + my @urls = parse_feed ($url); + print STDERR "$progname: " . ($#urls + 1) . " images\n" + if ($verbose > 1); + foreach my $p (@urls) { + my ($furl, $id) = @$p; + my $f = download_image ($furl, $id, $dir); + next unless $f; + $files{$f} = 1; # Got it, don't delete + $count++; + } + + print STDERR "$progname: empty feed: $url\n" if ($count <= 0); + + # Now delete any files that are no longer in the feed. + # But if there was nothing in the feed (network failure?) + # then don't blow away the old files. + # + my $kept = 0; + foreach my $f (keys(%files)) { + if ($count <= 0) { + $kept++; + } elsif ($files{$f}) { + $kept++; + } else { + if (unlink ("$dir/$f")) { + print STDERR "$progname: rm $dir/$f\n" if ($verbose > 1); + } else { + print STDERR "$progname: rm $dir/$f: $!\n"; # don't bail + } + } + } + + # Both feed and cache are empty. No files at all. + error ("empty feed: $url") if ($kept <= 1); + + $mtime = time(); # update the timestamp + + } else { + + # Not yet time to re-check the URL. + print STDERR "$progname: using cache: $url\n" if ($verbose); + + } + + # Unlock and update the write date on the .timestamp file. + # + truncate ($lock_fd, 0) || error ("unable to truncate $lock: $!"); + seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!"); + utime ($mtime, $mtime, $lock_fd) || error ("unable to touch $lock: $!"); + flock ($lock_fd, LOCK_UN) || error ("unable to unlock $lock: $!"); + close ($lock_fd); + $lock_fd = undef; + print STDERR "$progname: unlocked $lock\n" if ($verbose > 1); + + # Don't bother using the imageDirectory cache. We know that this directory + # is flat, and we can assume that an RSS feed doesn't contain 100,000 images + # like ~/Pictures/ might. + # + $cache_p = 0; + + # Return the URL and directory name of the files of that URL's local cache. + # + return ($url, $dir); } sub find_random_file($) { my ($dir) = @_; - $dir =~ s@/+$@@g; - if ($use_spotlight_p == -1) { $use_spotlight_p = 0; if (-x '/usr/bin/mdfind') { @@ -322,6 +744,14 @@ sub find_random_file($) { } } + my $url; + ($url, $dir) = mirror_feed ($dir); + + if ($url) { + $use_spotlight_p = 0; + print STDERR "$progname: $dir is cache for $url\n" if ($verbose > 1); + } + @all_files = read_cache ($dir); if ($#all_files >= 0) { @@ -349,7 +779,7 @@ sub find_random_file($) { write_cache ($dir); - @all_files = sort(@all_files); +# @all_files = sort(@all_files); if ($#all_files < 0) { print STDERR "$progname: no files in $dir\n"; @@ -362,22 +792,34 @@ sub find_random_file($) { my $n = int (rand ($#all_files + 1)); my $file = $all_files[$n]; if (large_enough_p ($file)) { + if (! $url) { + $file =~ s@^\Q$dir\L/@@so || die; # remove $dir from front + } return $file; } } print STDERR "$progname: no suitable images in $dir " . "(after $max_tries tries)\n"; + + # If we got here, blow away the cache. Maybe it's stale. + unlink $cache_file_name if $cache_file_name; + exit 1; } -sub large_enough_p { +sub large_enough_p($) { my ($file) = @_; my ($w, $h) = image_file_size ($file); if (!defined ($h)) { + + # Nonexistent files are obviously too small! + # Already printed $verbose message about the file not existing. + return 0 unless -f $file; + print STDERR "$progname: $file: unable to determine image size\n" if ($verbose); # Assume that unknown files are of good sizes: this will happen if @@ -399,7 +841,7 @@ sub large_enough_p { # Given the raw body of a GIF document, returns the dimensions of the image. # -sub gif_size { +sub gif_size($) { my ($body) = @_; my $type = substr($body, 0, 6); my $s; @@ -411,7 +853,7 @@ sub gif_size { # Given the raw body of a JPEG document, returns the dimensions of the image. # -sub jpeg_size { +sub jpeg_size($) { my ($body) = @_; my $i = 0; my $L = length($body); @@ -462,7 +904,7 @@ sub jpeg_size { # Given the raw body of a PNG document, returns the dimensions of the image. # -sub png_size { +sub png_size($) { my ($body) = @_; return () unless ($body =~ m/^\211PNG\r/s); my ($bits) = ($body =~ m/^.{12}(.{12})/s); @@ -476,7 +918,7 @@ sub png_size { # Given the raw body of a GIF, JPEG, or PNG document, returns the dimensions # of the image. # -sub image_size { +sub image_size($) { my ($body) = @_; return () if (length($body) < 10); my ($w, $h) = gif_size ($body); @@ -489,17 +931,17 @@ sub image_size { # Returns the dimensions of the image file. # -sub image_file_size { +sub image_file_size($) { my ($file) = @_; - local *IN; - if (! open (IN, "<$file")) { + my $in; + if (! open ($in, '<', $file)) { print STDERR "$progname: $file: $!\n" if ($verbose); - return undef; + return (); } - binmode (IN); # Larry can take Unicode and shove it up his ass sideways. + binmode ($in); # Larry can take Unicode and shove it up his ass sideways. my $body = ''; - sysread (IN, $body, 1024 * 50); # The first 50k should be enough. - close IN; # (It's not for certain huge jpegs... + sysread ($in, $body, 1024 * 50); # The first 50k should be enough. + close $in; # (It's not for certain huge jpegs... return image_size ($body); # but we know they're huge!) } @@ -510,15 +952,19 @@ sub error($) { exit 1; } -sub usage { +sub usage() { print STDERR "usage: $progname [--verbose] directory\n" . " Prints the name of a randomly-selected image file. The directory\n" . " is searched recursively. Images smaller than " . - "${min_image_width}x${min_image_height} are excluded.\n"; + "${min_image_width}x${min_image_height} are excluded.\n" . + "\n" . + " The directory may also be the URL of an RSS/Atom feed. Enclosed\n" . + " images will be downloaded and cached locally.\n" . + "\n"; exit 1; } -sub main { +sub main() { my $dir = undef; while ($_ = $ARGV[0]) { @@ -537,11 +983,18 @@ sub main { usage unless (defined($dir)); - $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/" + $dir =~ s@^feed:@http:@si; - if (! -d $dir) { - print STDERR "$progname: $dir: not a directory\n"; - usage; + if ($dir =~ m/^https?:/si) { + # ok + } else { + $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/" + $dir =~ s@/+$@@s; # omit trailing / + + if (! -d $dir) { + print STDERR "$progname: $dir: not a directory or URL\n"; + usage; + } } my $file = find_random_file ($dir);